コード例 #1
0
ファイル: dvframe.cpp プロジェクト: 6301158/ofx-dev
int DVFrame::ExtractYUV( void * yuv )
{
	unsigned char * pixels[ 3 ];
	int pitches[ 3 ];

	pixels[ 0 ] = ( unsigned char* ) yuv;
	pitches[ 0 ] = decoder->width * 2;

	dv_decode_full_frame( decoder, data, e_dv_color_yuv, pixels, pitches );
	return 0;
}
コード例 #2
0
ファイル: pdp_ieee1394l.c プロジェクト: Angeldude/pd
static int pdp_ieee1394_read_frame(t_pdp_ieee1394 *x)
{

  if (!x->x_decoder)return 0;
  if (!x->x_frame_ready) {
	//x->x_image.newimage = 0;
  }
  else {
    dv_parse_header(x->x_decoder, x->videobuf);
    dv_parse_packs (x->x_decoder, x->videobuf);
    if(dv_frame_changed(x->x_decoder)) {
      int pitches[3] = {0,0,0};
      //      pitches[0]=x_decoder->width*3; // rgb
      //      pitches[0]=x_decoder->width*((x_reqFormat==GL_RGBA)?3:2);
      pitches[0]=x->x_decoder->width*3;
      x->x_height=x->x_decoder->height;
      x->x_width=x->x_decoder->width;
      
      /* decode the DV-data to something we can handle and that is similar to the wanted format */
      //      dv_report_video_error(x_decoder, videobuf);  // do we need this ?
      // gosh, this(e_dv_color_rgb) is expansive:: the decoding is done in software only...
      //      dv_decode_full_frame(x_decoder, videobuf, ((x_reqFormat==GL_RGBA)?e_dv_color_rgb:e_dv_color_yuv), &decodedbuf, pitches);
      dv_decode_full_frame(x->x_decoder, x->videobuf, e_dv_color_rgb, &x->decodedbuf, pitches);

      //     post("sampling %d", x_decoder->sampling);

      /* convert the colour-space to the one we want */
      /*
       * btw. shouldn't this be done in [pix_video] rather than here ?
       * no because [pix_video] knows nothing about the possible colourspaces in here
       */

      // letting the library do the conversion to RGB and then doing the conversion to RGBA
      // is really stupid.
      // let's do it all ourselfes:
      //      if (x_reqFormat==GL_RGBA)x_image.image.fromRGB(decodedbuf); else
      //x_image.image.fromYVYU(decodedbuf);
    	process_image (x);
	
    }

    x->x_frame_ready = false;
  }
	
  return 1;
}
コード例 #3
0
ファイル: dvframe.cpp プロジェクト: 6301158/ofx-dev
int DVFrame::ExtractRGB( void * rgb )
{
	unsigned char * pixels[ 3 ];
	int pitches[ 3 ];

	pixels[ 0 ] = ( unsigned char* ) rgb;
	pixels[ 1 ] = NULL;


	pixels[ 2 ] = NULL;

	pitches[ 0 ] = GetWidth() * 3;
	pitches[ 1 ] = 0;
	pitches[ 2 ] = 0;

	dv_decode_full_frame( decoder, data, e_dv_color_rgb, pixels, pitches );
	return 0;
}
コード例 #4
0
ファイル: vd_libdv.c プロジェクト: 0p1pp1/mplayer
// decode a frame
static mp_image_t* decode(sh_video_t *sh,void* data,int len,int flags)
{
   mp_image_t* mpi;
   dv_decoder_t *decoder=sh->context;

   if(len<=0 || (flags&3)){
//      fprintf(stderr,"decode() (rawdv) SKIPPED\n");
      return NULL; // skipped frame
   }

   dv_parse_header(decoder, data);

   mpi=mpcodecs_get_image(sh, MP_IMGTYPE_TEMP, MP_IMGFLAG_ACCEPT_STRIDE, sh->disp_w, sh->disp_h);

   if(!mpi){	// temporary!
      fprintf(stderr,"couldn't allocate image for stderr codec\n");
      return NULL;
   }

   dv_decode_full_frame(decoder, data, e_dv_color_yuv, mpi->planes, mpi->stride);

   return mpi;
}
コード例 #5
0
ファイル: libdv.c プロジェクト: Cuchulain/cinelerra
int dv_read_video(dv_t *dv, 
		unsigned char **output_rows, 
		unsigned char *data, 
		long bytes,
		int color_model)
{
	int dif = 0;
	int lost_coeffs = 0;
	long offset = 0;
	int isPAL = 0;
	int is61834 = 0;
	int numDIFseq;
	int ds;
	int i, v, b, m;
	dv_block_t *bl;
	long mb_offset;
	dv_sample_t sampling;
	dv_macroblock_t *mb;
	int pixel_size;
	int pitches[3];
	int use_temp = color_model != BC_YUV422;
	unsigned char *pixels[3];

//printf("dv_read_video 1 %d\n", color_model);
	pthread_mutex_lock(&dv_lock);
	switch(bytes)
	{
		case DV_PAL_SIZE:
			break;
		case DV_NTSC_SIZE:
			break;
		default:
			return 1;
			break;
	}

	if(data[0] != 0x1f) return 1;

	pitches[0] = DV_WIDTH * 2;
	pitches[1] = 0;
	pitches[2] = 0;
	pixels[1] = 0;
	pixels[2] = 0;

	dv_parse_header(dv->decoder, data);

	if(!use_temp)
	{
//printf("dv_read_video 1\n");
		pixels[0] = output_rows[0];
		dv_decode_full_frame(dv->decoder, 
			data, 
			e_dv_color_yuv, 
			output_rows, 
			pitches);
//printf("dv_read_video 2\n");
	}
	else
	{
		unsigned char *temp_rows[DV_HEIGHT];
		if(!dv->temp_video)
			dv->temp_video = calloc(1, DV_WIDTH * DV_HEIGHT * 2);

		for(i = 0; i < DV_HEIGHT; i++)
		{
			temp_rows[i] = dv->temp_video + i * DV_WIDTH * 2;
		}

		pixels[0] = dv->temp_video;
//printf("dv_read_video 3 %p\n", data);
		dv_decode_full_frame(dv->decoder, 
			data, 
			e_dv_color_yuv, 
			pixels, 
			pitches);
//printf("dv_read_video 4\n");

		cmodel_transfer(output_rows, 
			temp_rows,
			output_rows[0],
			output_rows[1],
			output_rows[2],
			0,
			0,
			0,
			0, 
			0, 
			DV_WIDTH, 
			dv->decoder->height,
			0, 
			0, 
			DV_WIDTH, 
			dv->decoder->height,
			BC_YUV422, 
			color_model,
			0,
			DV_WIDTH,
			DV_WIDTH);
	}
	dv->decoder->prev_frame_decoded = 1;
	pthread_mutex_unlock(&dv_lock);
	return 0;
}
コード例 #6
0
long int lp_libdv_in::read_samples(float *buffer, long int len)
{
	int audio_decoded = 0, file_readen = 0;
	int i, channel;
//	std::cerr << "lp_libdv_in::" << __FUNCTION__ << " called\n";

	// TESTS
	len = len/2;

  /* Interleave the audio into a single buffer */
/*  for (i = 0, samples = dv_get_num_samples (dv), channels = dv_get_num_channels (dv);
       i < samples; i++) {
    for (ch = 0; ch < channels; ch++) {
      oss -> buffer [j++] = out [ch] [i];
    }
  }
*/

	// Check if decoding is requierd
	//while(pv_audio_num_ready <= len){
	while(pv_lp_audio_buffer.left() <= len){
		file_readen = read(pv_fd, pv_file_buffer, 144000); // FIXME: len to read ?
		// decode a video frame and display - NOTE this part must move to another class later...
		dv_decode_full_frame(pv_decoder, pv_file_buffer, pv_color_space, pv_sdl_out->pb_pixels, pv_sdl_out->pb_pitches);
		pv_sdl_out->display();
		// decode audio
		dv_decode_full_audio(pv_decoder, pv_file_buffer, pv_dv_audio_buffers);
		audio_decoded = dv_get_num_samples(pv_decoder);
		// Copy to ready buffer
		
		//for(i=0; i<audio_decoded; i++){
			for(channel = 0; channel < pv_audio_channels; channel++){
				// write to ready buffer from start position
				//pv_audio_ready_buffer[pv_audio_decoder_start+channel+i*pv_audio_channels] = pv_dv_audio_buffers[channel][i];
				pv_lp_audio_buffer.put(pv_dv_audio_buffers[channel], audio_decoded / pv_audio_channels);
			}
		//}
		pv_audio_num_ready = pv_audio_num_ready + audio_decoded;
		// update start pos
		pv_audio_decoder_start = pv_audio_decoder_start + audio_decoded;
	}

	pv_lp_audio_buffer.get(pv_audio_ready_buffer, len);

	float tmp;
	// Copy needed to output buffer
	for(i=0; i<len; i++){
		//buffer[i] = (float)(pv_audio_ready_buffer[pv_audio_decoder_start+i] / 32768);
		//buffer[i] = (float)(pv_audio_ready_buffer[pv_audio_decoder_start+i] / 4000);
		tmp = (float)pv_audio_ready_buffer[i];
		buffer[i] = tmp / 6000.0;
		//buffer[i] = (float)(pv_audio_ready_buffer[i] / 6000);
	}
	// update start pos
	pv_audio_consumer_start = pv_audio_decoder_start + len;

	// Calcule samples consomés
	pv_audio_num_ready = pv_audio_num_ready - len;

	// On déplace le reste au début de pv_audio_ready buffer
/*	if(pv_audio_num_ready > 0){
		for(i=0; i<pv_audio_num_ready; i++){
			pv_audio_ready_buffer[i] = pv_audio_ready_buffer[pv_audio_decoder_start+i];
		}
		// reset positions
		pv_audio_decoder_start = 0;
		pv_audio_consumer_start = 0;
	}
*/
//	std::cout << "Décodés: " << pv_audio_num_ready << " - consommés: " << len << " - prêts: " << pv_audio_num_ready << "\n";
//	std::cout << "Start decodeur: " << pv_audio_decoder_start << " - start consumer: " << pv_audio_consumer_start << "\n\n";

	// THIS is false !
	//return file_readen;
	return len;
}
コード例 #7
0
ファイル: gstdvdec.c プロジェクト: slkwyy/gst-plugins-good
static GstFlowReturn
gst_dvdec_chain (GstPad * pad, GstObject * parent, GstBuffer * buf)
{
  GstDVDec *dvdec;
  guint8 *inframe;
  guint8 *outframe_ptrs[3];
  gint outframe_pitches[3];
  GstMapInfo map;
  GstVideoFrame frame;
  GstBuffer *outbuf;
  GstFlowReturn ret = GST_FLOW_OK;
  guint length;
  guint64 cstart = GST_CLOCK_TIME_NONE, cstop = GST_CLOCK_TIME_NONE;
  gboolean PAL, wide;

  dvdec = GST_DVDEC (parent);

  gst_buffer_map (buf, &map, GST_MAP_READ);
  inframe = map.data;

  /* buffer should be at least the size of one NTSC frame, this should
   * be enough to decode the header. */
  if (G_UNLIKELY (map.size < NTSC_BUFFER))
    goto wrong_size;

  /* preliminary dropping. unref and return if outside of configured segment */
  if ((dvdec->segment.format == GST_FORMAT_TIME) &&
      (!(gst_segment_clip (&dvdec->segment, GST_FORMAT_TIME,
                  GST_BUFFER_TIMESTAMP (buf),
                  GST_BUFFER_TIMESTAMP (buf) + GST_BUFFER_DURATION (buf),
                  &cstart, &cstop))))
    goto dropping;

  if (G_UNLIKELY (dv_parse_header (dvdec->decoder, inframe) < 0))
    goto parse_header_error;

  /* get size */
  PAL = dv_system_50_fields (dvdec->decoder);
  wide = dv_format_wide (dvdec->decoder);

  /* check the buffer is of right size after we know if we are
   * dealing with PAL or NTSC */
  length = (PAL ? PAL_BUFFER : NTSC_BUFFER);
  if (G_UNLIKELY (map.size < length))
    goto wrong_size;

  dv_parse_packs (dvdec->decoder, inframe);

  if (dvdec->video_offset % dvdec->drop_factor != 0)
    goto skip;

  /* renegotiate on change */
  if (PAL != dvdec->PAL || wide != dvdec->wide) {
    dvdec->src_negotiated = FALSE;
    dvdec->PAL = PAL;
    dvdec->wide = wide;
  }

  dvdec->height = (dvdec->PAL ? PAL_HEIGHT : NTSC_HEIGHT);

  dvdec->interlaced = !dv_is_progressive (dvdec->decoder);

  /* negotiate if not done yet */
  if (!dvdec->src_negotiated) {
    if (!gst_dvdec_src_negotiate (dvdec))
      goto not_negotiated;
  }

  if (gst_pad_check_reconfigure (dvdec->srcpad)) {
    GstCaps *caps;

    caps = gst_pad_get_current_caps (dvdec->srcpad);
    if (!caps)
      goto not_negotiated;

    gst_dvdec_negotiate_pool (dvdec, caps, &dvdec->vinfo);
    gst_caps_unref (caps);
  }

  if (dvdec->need_segment) {
    gst_pad_push_event (dvdec->srcpad, gst_event_new_segment (&dvdec->segment));
    dvdec->need_segment = FALSE;
  }

  ret = gst_buffer_pool_acquire_buffer (dvdec->pool, &outbuf, NULL);
  if (G_UNLIKELY (ret != GST_FLOW_OK))
    goto no_buffer;

  gst_video_frame_map (&frame, &dvdec->vinfo, outbuf, GST_MAP_WRITE);

  outframe_ptrs[0] = GST_VIDEO_FRAME_COMP_DATA (&frame, 0);
  outframe_pitches[0] = GST_VIDEO_FRAME_COMP_STRIDE (&frame, 0);

  /* the rest only matters for YUY2 */
  if (dvdec->bpp < 3) {
    outframe_ptrs[1] = GST_VIDEO_FRAME_COMP_DATA (&frame, 1);
    outframe_ptrs[2] = GST_VIDEO_FRAME_COMP_DATA (&frame, 2);

    outframe_pitches[1] = GST_VIDEO_FRAME_COMP_STRIDE (&frame, 1);
    outframe_pitches[2] = GST_VIDEO_FRAME_COMP_STRIDE (&frame, 2);
  }

  GST_DEBUG_OBJECT (dvdec, "decoding and pushing buffer");
  dv_decode_full_frame (dvdec->decoder, inframe,
      e_dv_color_yuv, outframe_ptrs, outframe_pitches);

  gst_video_frame_unmap (&frame);

  GST_BUFFER_FLAG_UNSET (outbuf, GST_VIDEO_BUFFER_FLAG_TFF);

  GST_BUFFER_OFFSET (outbuf) = GST_BUFFER_OFFSET (buf);
  GST_BUFFER_OFFSET_END (outbuf) = GST_BUFFER_OFFSET_END (buf);

  /* FIXME : Compute values when using non-TIME segments,
   * but for the moment make sure we at least don't set bogus values
   */
  if (GST_CLOCK_TIME_IS_VALID (cstart)) {
    GST_BUFFER_TIMESTAMP (outbuf) = cstart;
    if (GST_CLOCK_TIME_IS_VALID (cstop))
      GST_BUFFER_DURATION (outbuf) = cstop - cstart;
  }

  ret = gst_pad_push (dvdec->srcpad, outbuf);

skip:
  dvdec->video_offset++;

done:
  gst_buffer_unmap (buf, &map);
  gst_buffer_unref (buf);

  return ret;

  /* ERRORS */
wrong_size:
  {
    GST_ELEMENT_ERROR (dvdec, STREAM, DECODE,
        (NULL), ("Input buffer too small"));
    ret = GST_FLOW_ERROR;
    goto done;
  }
parse_header_error:
  {
    GST_ELEMENT_ERROR (dvdec, STREAM, DECODE,
        (NULL), ("Error parsing DV header"));
    ret = GST_FLOW_ERROR;
    goto done;
  }
not_negotiated:
  {
    GST_DEBUG_OBJECT (dvdec, "could not negotiate output");
    ret = GST_FLOW_NOT_NEGOTIATED;
    goto done;
  }
no_buffer:
  {
    GST_DEBUG_OBJECT (dvdec, "could not allocate buffer");
    goto done;
  }

dropping:
  {
    GST_DEBUG_OBJECT (dvdec,
        "dropping buffer since it's out of the configured segment");
    goto done;
  }
}
コード例 #8
0
int main( int argc, char **argv)
{
    int infile = 0;
    unsigned char dv_buffer[144000];
    unsigned char video_buffer[720 * 576 * 3];
    int16_t *audio_bufs[4];
    dv_decoder_t *decoder = NULL;
    dv_encoder_t *encoder = NULL;
    int pitches[3];
    unsigned char *pixels[3];
    int i = 0, j;
    int isPAL = FALSE;

    pitches[0] = 720 * 2;
    pixels[0] = video_buffer;

    for(i = 0; i < 4; i++) {
        audio_bufs[i] = malloc(DV_AUDIO_MAX_SAMPLES*sizeof(int16_t));
    }

    /* assume NTSC for now, switch to PAL later if needed */
    decoder = dv_decoder_new(FALSE, FALSE, FALSE);
    encoder = dv_encoder_new(FALSE, FALSE, FALSE);

    decoder->quality = DV_QUALITY_BEST;
    encoder->vlc_encode_passes = 3;
    encoder->static_qno = 0;
    encoder->force_dct = DV_DCT_AUTO;

    i = 0;
    while (read_frame(stdin, dv_buffer, &isPAL)) {
        dv_parse_header(decoder, dv_buffer);
        if (isPAL != encoder->isPAL && isPAL == TRUE) {
            decoder->clamp_luma = FALSE;
            decoder->clamp_chroma = FALSE;
            encoder->clamp_luma = FALSE;
            encoder->clamp_chroma = FALSE;
            dv_reconfigure(FALSE, FALSE);
        } else if (isPAL != encoder->isPAL) {
            decoder->clamp_luma = TRUE;
            decoder->clamp_chroma = TRUE;
            decoder->add_ntsc_setup = TRUE;
            encoder->clamp_luma = TRUE;
            encoder->clamp_chroma = TRUE;
            encoder->rem_ntsc_setup = TRUE;
            dv_reconfigure(TRUE, TRUE);
        }
        encoder->isPAL = isPAL;
        encoder->is16x9 = (dv_format_wide(decoder)>0);
        dv_decode_full_audio(decoder, dv_buffer, audio_bufs);
        for (j = 0; j < TIMES; j++) {
            dv_decode_full_frame(decoder, dv_buffer, e_dv_color_yuv,
                                 pixels, pitches);

            dv_encode_full_frame(encoder, pixels, e_dv_color_yuv, dv_buffer);
        }
        dv_encode_full_audio(encoder, audio_bufs, 2, 48000, dv_buffer);
        fwrite(dv_buffer, 1, (isPAL ? 144000 : 120000), stdout);
    }

    close(infile);

    for(i=0; i < 4; i++) free(audio_bufs[i]);
    dv_decoder_free(decoder);
    dv_encoder_free(encoder);

    return 0;
}
コード例 #9
0
ファイル: qtdv.c プロジェクト: knutj/cinelerra
static int decode(quicktime_t *file, unsigned char **row_pointers, int track)
{
    long bytes;
    quicktime_video_map_t *vtrack = &(file->vtracks[track]);
    quicktime_dv_codec_t *codec = ((quicktime_codec_t*)vtrack->codec)->priv;
    int width = vtrack->track->tkhd.track_width;
    int height = vtrack->track->tkhd.track_height;
    int result = 0;
    int i;
    int decode_colormodel = 0;
    int pitches[3] = { 720 * 2, 0, 0 };


    quicktime_set_video_position(file, vtrack->current_position, track);
    bytes = quicktime_frame_size(file, vtrack->current_position, track);
    result = !quicktime_read_data(file, (char*)codec->data, bytes);

    if( codec->dv_decoder && codec->parameters_changed )
    {
        dv_decoder_free( codec->dv_decoder );
        codec->dv_decoder = NULL;
        codec->parameters_changed = 0;
    }

    if( ! codec->dv_decoder )
    {
        pthread_mutex_lock( &libdv_init_mutex );


        codec->dv_decoder = dv_decoder_new( codec->add_ntsc_setup,
                                            codec->clamp_luma,
                                            codec->clamp_chroma );
        codec->dv_decoder->prev_frame_decoded = 0;

        codec->parameters_changed = 0;
        pthread_mutex_unlock( &libdv_init_mutex );
    }

    if(codec->dv_decoder)
    {
        int is_sequential =
            check_sequentiality( row_pointers,
                                 720 * cmodel_calculate_pixelsize(file->color_model),
                                 file->out_h );

        codec->dv_decoder->quality = codec->decode_quality;

        dv_parse_header( codec->dv_decoder, codec->data );

// Libdv improperly decodes RGB colormodels.
        if((file->color_model == BC_YUV422 ||
                file->color_model == BC_RGB888) &&
                file->in_x == 0 &&
                file->in_y == 0 &&
                file->in_w == width &&
                file->in_h == height &&
                file->out_w == width &&
                file->out_h == height &&
                is_sequential)
        {
            if( file->color_model == BC_YUV422 )
            {
                pitches[0] = 720 * 2;
                dv_decode_full_frame( codec->dv_decoder, codec->data,
                                      e_dv_color_yuv, row_pointers,
                                      pitches );
            }
            else if( file->color_model == BC_RGB888)
            {
                pitches[0] = 720 * 3;
                dv_decode_full_frame( codec->dv_decoder, codec->data,
                                      e_dv_color_rgb, row_pointers,
                                      pitches );
            }
        }
        else
        {
            if(!codec->temp_frame)
            {
                codec->temp_frame = malloc(720 * 576 * 2);
                codec->temp_rows = malloc(sizeof(unsigned char*) * 576);
                for(i = 0; i < 576; i++)
                    codec->temp_rows[i] = codec->temp_frame + 720 * 2 * i;
            }

            decode_colormodel = BC_YUV422;
            pitches[0] = 720 * 2;
            dv_decode_full_frame( codec->dv_decoder, codec->data,
                                  e_dv_color_yuv, codec->temp_rows,
                                  pitches );




            cmodel_transfer(row_pointers,
                            codec->temp_rows,
                            row_pointers[0],
                            row_pointers[1],
                            row_pointers[2],
                            codec->temp_rows[0],
                            codec->temp_rows[1],
                            codec->temp_rows[2],
                            file->in_x,
                            file->in_y,
                            file->in_w,
                            file->in_h,
                            0,
                            0,
                            file->out_w,
                            file->out_h,
                            decode_colormodel,
                            file->color_model,
                            0,
                            width,
                            file->out_w);
        }
    }

//printf(__FUNCTION__ " 2\n");
    return result;
}
コード例 #10
0
ファイル: lav_common.c プロジェクト: AquaSoftGmbH/mjpeg
/*
 * readframe - read jpeg or dv frame into yuv buffer
 *
 * returns:
 *	0   success
 *	1   fatal error
 *	2   corrupt data encountered; 
 *		decoding can continue, but this frame may be damaged 
 */
int readframe(int numframe, 
	      uint8_t *frame[],
	      LavParam *param,
	      EditList el)
{
  int len, i, res, data_format;
  uint8_t *frame_tmp;
  int warn;
  warn = 0;

  if (MAX_JPEG_LEN < el.max_frame_size) {
    mjpeg_error_exit1( "Max size of JPEG frame = %ld: too big",
		       el.max_frame_size);
  }
  
  len = el_get_video_frame(jpeg_data, numframe, &el);
  data_format = el_video_frame_data_format(numframe, &el);
  
  switch(data_format) {

  case DATAFORMAT_DV2 :
#ifndef HAVE_LIBDV
    mjpeg_error("DV input was not configured at compile time");
    res = 1;
#else
    mjpeg_debug("DV frame %d   len %d",numframe,len);
    res = 0;
    dv_parse_header(decoder, jpeg_data);
    switch(decoder->sampling) {
    case e_dv_sample_420:
      /* libdv decodes PAL DV directly as planar YUV 420
       * (YV12 or 4CC 0x32315659) if configured with the flag
       * --with-pal-yuv=YV12 which is not (!) the default
       */
      if (libdv_pal_yv12 == 1) {
	pitches[0] = decoder->width;
	pitches[1] = decoder->width / 2;
	pitches[2] = decoder->width / 2;
	if (pitches[0] != param->output_width ||
	    pitches[1] != param->chroma_width) {
	  mjpeg_error("for DV 4:2:0 only full width output is supported");
	  res = 1;
	} else {
	  dv_decode_full_frame(decoder, jpeg_data, e_dv_color_yuv,
			       frame, (int *)pitches);
	  /* swap the U and V components */
	  frame_tmp = frame[2];
	  frame[2] = frame[1];
	  frame[1] = frame_tmp;
	}
	break;
      }
    case e_dv_sample_411:
    case e_dv_sample_422:
      /* libdv decodes NTSC DV (native 411) and by default also PAL
       * DV (native 420) as packed YUV 422 (YUY2 or 4CC 0x32595559)
       * where the U and V information is repeated.  This can be
       * transformed to planar 420 (YV12 or 4CC 0x32315659).
       * For NTSC DV this transformation is lossy.
       */
      pitches[0] = decoder->width * 2;
      pitches[1] = 0;
      pitches[2] = 0;
      if (decoder->width != param->output_width) {
	mjpeg_error("for DV only full width output is supported");
	res = 1;
      } else {
	dv_decode_full_frame(decoder, jpeg_data, e_dv_color_yuv,
			     dv_frame, (int *)pitches);
	frame_YUV422_to_planar(frame, dv_frame[0],
			       decoder->width,	decoder->height,
			       param->chroma);
      }
      break;
    default:
      res = 1;
      break;
    }
#endif /* HAVE_LIBDV */
    break;

  case DATAFORMAT_YUV420 :
  case DATAFORMAT_YUV422 :
    mjpeg_debug("raw YUV frame %d   len %d",numframe,len);
    frame_tmp = jpeg_data;
    memcpy(frame[0], frame_tmp, param->luma_size);
    frame_tmp += param->luma_size;
    memcpy(frame[1], frame_tmp, param->chroma_size);
    frame_tmp += param->chroma_size;
    memcpy(frame[2], frame_tmp, param->chroma_size);
    res = 0;
    break;

  default:
    mjpeg_debug("MJPEG frame %d   len %d",numframe,len);
    res = decode_jpeg_raw(jpeg_data, len, el.video_inter,
			  param->chroma,
			  param->output_width, param->output_height,
			  frame[0], frame[1], frame[2]);
  }
  
  if (res < 0) {
    mjpeg_warn( "Fatal Error Decoding Frame %d", numframe);
    return 1;
  } else if (res == 1) {
    mjpeg_warn( "Decoding of Frame %d failed", numframe);
    warn = 1;
    res = 0;
  }
  
  
  if (param->mono) {
    for (i = 0;
	 i < param->chroma_size;
	 ++i) {
      frame[1][i] = 0x80;
      frame[2][i] = 0x80;
    }
  }

  if(warn)
	  return 2;
  else
	  return 0;
}
コード例 #11
0
ファイル: gstdvdec.c プロジェクト: prajnashi/gst-plugins-good
static GstFlowReturn
gst_dvdec_chain (GstPad * pad, GstBuffer * buf)
{
  GstDVDec *dvdec;
  guint8 *inframe;
  guint8 *outframe;
  guint8 *outframe_ptrs[3];
  gint outframe_pitches[3];
  GstBuffer *outbuf;
  GstFlowReturn ret = GST_FLOW_OK;
  guint length;
  gint64 cstart, cstop;
  gboolean PAL, wide;

  dvdec = GST_DVDEC (gst_pad_get_parent (pad));
  inframe = GST_BUFFER_DATA (buf);

  /* buffer should be at least the size of one NTSC frame, this should
   * be enough to decode the header. */
  if (G_UNLIKELY (GST_BUFFER_SIZE (buf) < NTSC_BUFFER))
    goto wrong_size;

  /* preliminary dropping. unref and return if outside of configured segment */
  if ((dvdec->segment->format == GST_FORMAT_TIME) &&
      (!(gst_segment_clip (dvdec->segment, GST_FORMAT_TIME,
                  GST_BUFFER_TIMESTAMP (buf),
                  GST_BUFFER_TIMESTAMP (buf) + GST_BUFFER_DURATION (buf),
                  &cstart, &cstop))))
    goto dropping;

  if (G_UNLIKELY (dv_parse_header (dvdec->decoder, inframe) < 0))
    goto parse_header_error;

  /* get size */
  PAL = dv_system_50_fields (dvdec->decoder);
  wide = dv_format_wide (dvdec->decoder);

  /* check the buffer is of right size after we know if we are
   * dealing with PAL or NTSC */
  length = (PAL ? PAL_BUFFER : NTSC_BUFFER);
  if (G_UNLIKELY (GST_BUFFER_SIZE (buf) < length))
    goto wrong_size;

  dv_parse_packs (dvdec->decoder, inframe);

  if (dvdec->video_offset % dvdec->drop_factor != 0)
    goto skip;

  /* renegotiate on change */
  if (PAL != dvdec->PAL || wide != dvdec->wide) {
    dvdec->src_negotiated = FALSE;
    dvdec->PAL = PAL;
    dvdec->wide = wide;
  }

  dvdec->height = (dvdec->PAL ? PAL_HEIGHT : NTSC_HEIGHT);


  /* negotiate if not done yet */
  if (!dvdec->src_negotiated) {
    if (!gst_dvdec_src_negotiate (dvdec))
      goto not_negotiated;
  }

  ret =
      gst_pad_alloc_buffer_and_set_caps (dvdec->srcpad, 0,
      (720 * dvdec->height) * dvdec->bpp,
      GST_PAD_CAPS (dvdec->srcpad), &outbuf);
  if (G_UNLIKELY (ret != GST_FLOW_OK))
    goto no_buffer;

  outframe = GST_BUFFER_DATA (outbuf);

  outframe_ptrs[0] = outframe;
  outframe_pitches[0] = 720 * dvdec->bpp;

  /* the rest only matters for YUY2 */
  if (dvdec->bpp < 3) {
    outframe_ptrs[1] = outframe_ptrs[0] + 720 * dvdec->height;
    outframe_ptrs[2] = outframe_ptrs[1] + 360 * dvdec->height;

    outframe_pitches[1] = dvdec->height / 2;
    outframe_pitches[2] = outframe_pitches[1];
  }

  GST_DEBUG_OBJECT (dvdec, "decoding and pushing buffer");
  dv_decode_full_frame (dvdec->decoder, inframe,
      e_dv_color_yuv, outframe_ptrs, outframe_pitches);

  GST_BUFFER_OFFSET (outbuf) = GST_BUFFER_OFFSET (buf);
  GST_BUFFER_OFFSET_END (outbuf) = GST_BUFFER_OFFSET_END (buf);
  GST_BUFFER_TIMESTAMP (outbuf) = cstart;
  GST_BUFFER_DURATION (outbuf) = cstop - cstart;

  ret = gst_pad_push (dvdec->srcpad, outbuf);

skip:
  dvdec->video_offset++;

done:
  gst_buffer_unref (buf);
  gst_object_unref (dvdec);

  return ret;

  /* ERRORS */
wrong_size:
  {
    GST_ELEMENT_ERROR (dvdec, STREAM, DECODE,
        (NULL), ("Input buffer too small"));
    ret = GST_FLOW_ERROR;
    goto done;
  }
parse_header_error:
  {
    GST_ELEMENT_ERROR (dvdec, STREAM, DECODE,
        (NULL), ("Error parsing DV header"));
    ret = GST_FLOW_ERROR;
    goto done;
  }
not_negotiated:
  {
    GST_DEBUG_OBJECT (dvdec, "could not negotiate output");
    ret = GST_FLOW_NOT_NEGOTIATED;
    goto done;
  }
no_buffer:
  {
    GST_DEBUG_OBJECT (dvdec, "could not allocate buffer");
    goto done;
  }

dropping:
  {
    GST_DEBUG_OBJECT (dvdec,
        "dropping buffer since it's out of the configured segment");
    goto done;
  }
}