static int rawdv_check_file(demuxer_t *demuxer)
{
   unsigned char tmp_buffer[DV_PAL_FRAME_SIZE];
   int bytes_read=0;
   int result=0;
   dv_decoder_t *td;

   mp_msg(MSGT_DEMUX,MSGL_V,"Checking for DV\n");

   bytes_read=stream_read(demuxer->stream,tmp_buffer,DV_PAL_FRAME_SIZE);
   if ((bytes_read!=DV_PAL_FRAME_SIZE) && (bytes_read!=DV_NTSC_FRAME_SIZE))
      return 0;

   td=dv_decoder_new(TRUE,TRUE,FALSE);
   if (!td)
      return 0;

   td->quality=DV_QUALITY_BEST;
   result=dv_parse_header(td, tmp_buffer);
   if (result<0)
      return 0;

   if ((( td->num_dif_seqs==10) || (td->num_dif_seqs==12))
       && (td->width==720)
       && ((td->height==576) || (td->height==480)))
      result=1;
   dv_decoder_free(td);
   if (result)
      return DEMUXER_TYPE_RAWDV;
   else
      return 0;
}
Beispiel #2
0
static int decode_audio(sh_audio_t *audio, unsigned char *buf, int minlen, int maxlen)
{
   int len=0;
   dv_decoder_t* decoder=audio->context;  //global_rawdv_decoder;
   unsigned char* dv_audio_frame=NULL;
   int xx=ds_get_packet(audio->ds,&dv_audio_frame);
   if(xx<=0 || !dv_audio_frame) return 0; // EOF?

   dv_parse_header(decoder, dv_audio_frame);

   if(xx!=decoder->frame_size)
       mp_tmsg(MSGT_GLOBAL,MSGL_WARN,"[AD_LIBDV] Warning! Audio framesize differs! read=%d  hdr=%d.\n",
           xx, decoder->frame_size);

   if (dv_decode_full_audio(decoder, dv_audio_frame,(int16_t**) audioBuffers))
   {
      /* Interleave the audio into a single buffer */
      int i=0;
      int16_t *bufP=(int16_t*)buf;

//      printf("samples=%d/%d  chans=%d  mem=%d  \n",decoder->audio->samples_this_frame,DV_AUDIO_MAX_SAMPLES,
//          decoder->audio->num_channels, decoder->audio->samples_this_frame*decoder->audio->num_channels*2);

//   return (44100/30)*4;

      for (i=0; i < decoder->audio->samples_this_frame; i++)
      {
         int ch;
         for (ch=0; ch < decoder->audio->num_channels; ch++)
            bufP[len++] = audioBuffers[ch][i];
      }
   }
   return len*2;
}
Beispiel #3
0
int dv_read_audio(dv_t *dv, 
		unsigned char *samples,
		unsigned char *data,
		long size,
		int channels,
		int bits)
{
	long current_position;
	int norm;
	int i, j;
    int audio_bytes;
	short *samples_int16 = (short*)samples;
	int samples_read;
	if(channels > 4) channels = 4;

// For some reason someone had problems with libdv's maxmimum audio samples
#define MAX_AUDIO_SAMPLES 2048
	if(!dv->temp_audio[0])
	{
		for(i = 0; i < 4; i++)
			dv->temp_audio[i] = calloc(1, sizeof(int16_t) * MAX_AUDIO_SAMPLES);
	}

	switch(size)
	{
		case DV_PAL_SIZE:
			norm = DV_PAL;
			break;
		case DV_NTSC_SIZE:
			norm = DV_NTSC;
			break;
		default:
			return 0;
			break;
	}

	if(data[0] != 0x1f) return 0;

	dv_parse_header(dv->decoder, data);
	dv_decode_full_audio(dv->decoder, data, dv->temp_audio);
	samples_read = dv->decoder->audio->samples_this_frame;

	for(i = 0; i < channels; i++)
	{
		for(j = 0; j < samples_read; j++)
		{
			samples_int16[i + j * channels] = dv->temp_audio[i][j];
			if(samples_int16[i + j * channels] == -0x8000)
				samples_int16[i + j * channels] = 0;
		}
	}






	return samples_read;
}
Beispiel #4
0
static int pdp_ieee1394_read_frame(t_pdp_ieee1394 *x)
{

  if (!x->x_decoder)return 0;
  if (!x->x_frame_ready) {
	//x->x_image.newimage = 0;
  }
  else {
    dv_parse_header(x->x_decoder, x->videobuf);
    dv_parse_packs (x->x_decoder, x->videobuf);
    if(dv_frame_changed(x->x_decoder)) {
      int pitches[3] = {0,0,0};
      //      pitches[0]=x_decoder->width*3; // rgb
      //      pitches[0]=x_decoder->width*((x_reqFormat==GL_RGBA)?3:2);
      pitches[0]=x->x_decoder->width*3;
      x->x_height=x->x_decoder->height;
      x->x_width=x->x_decoder->width;
      
      /* decode the DV-data to something we can handle and that is similar to the wanted format */
      //      dv_report_video_error(x_decoder, videobuf);  // do we need this ?
      // gosh, this(e_dv_color_rgb) is expansive:: the decoding is done in software only...
      //      dv_decode_full_frame(x_decoder, videobuf, ((x_reqFormat==GL_RGBA)?e_dv_color_rgb:e_dv_color_yuv), &decodedbuf, pitches);
      dv_decode_full_frame(x->x_decoder, x->videobuf, e_dv_color_rgb, &x->decodedbuf, pitches);

      //     post("sampling %d", x_decoder->sampling);

      /* convert the colour-space to the one we want */
      /*
       * btw. shouldn't this be done in [pix_video] rather than here ?
       * no because [pix_video] knows nothing about the possible colourspaces in here
       */

      // letting the library do the conversion to RGB and then doing the conversion to RGBA
      // is really stupid.
      // let's do it all ourselfes:
      //      if (x_reqFormat==GL_RGBA)x_image.image.fromRGB(decodedbuf); else
      //x_image.image.fromYVYU(decodedbuf);
    	process_image (x);
	
    }

    x->x_frame_ready = false;
  }
	
  return 1;
}
Beispiel #5
0
// decode a frame
static mp_image_t* decode(sh_video_t *sh,void* data,int len,int flags)
{
   mp_image_t* mpi;
   dv_decoder_t *decoder=sh->context;

   if(len<=0 || (flags&3)){
//      fprintf(stderr,"decode() (rawdv) SKIPPED\n");
      return NULL; // skipped frame
   }

   dv_parse_header(decoder, data);

   mpi=mpcodecs_get_image(sh, MP_IMGTYPE_TEMP, MP_IMGFLAG_ACCEPT_STRIDE, sh->disp_w, sh->disp_h);

   if(!mpi){	// temporary!
      fprintf(stderr,"couldn't allocate image for stderr codec\n");
      return NULL;
   }

   dv_decode_full_frame(decoder, data, e_dv_color_yuv, mpi->planes, mpi->stride);

   return mpi;
}
static demuxer_t* demux_open_rawdv(demuxer_t* demuxer)
{
   unsigned char dv_frame[DV_PAL_FRAME_SIZE];
   sh_video_t *sh_video = NULL;
   rawdv_frames_t *frames = malloc(sizeof(rawdv_frames_t));
   dv_decoder_t *dv_decoder=NULL;

   mp_msg(MSGT_DEMUXER,MSGL_V,"demux_open_rawdv() end_pos %"PRId64"\n",(int64_t)demuxer->stream->end_pos);

   // go back to the beginning
   stream_reset(demuxer->stream);
   stream_seek(demuxer->stream, 0);

   //get the first frame
   stream_read(demuxer->stream, dv_frame, DV_PAL_FRAME_SIZE);

   //read params from this frame
   dv_decoder=dv_decoder_new(TRUE,TRUE,FALSE);
   dv_decoder->quality=DV_QUALITY_BEST;

   if (dv_parse_header(dv_decoder, dv_frame) == -1)
	   return NULL;

   // create a new video stream header
   sh_video = new_sh_video(demuxer, 0);
   if (!sh_video)
	   return NULL;

   // make sure the demuxer knows about the new video stream header
   // (even though new_sh_video() ought to take care of it)
   demuxer->seekable = 1;
   demuxer->video->sh = sh_video;

   // make sure that the video demuxer stream header knows about its
   // parent video demuxer stream (this is getting wacky), or else
   // video_read_properties() will choke
   sh_video->ds = demuxer->video;

   // custom fourcc for internal MPlayer use
//   sh_video->format = mmioFOURCC('R', 'A', 'D', 'V');
   sh_video->format = mmioFOURCC('D', 'V', 'S', 'D');

   sh_video->disp_w = dv_decoder->width;
   sh_video->disp_h = dv_decoder->height;
   mp_msg(MSGT_DEMUXER,MSGL_V,"demux_open_rawdv() frame_size: %d w: %d h: %d dif_seq: %d system: %d\n",dv_decoder->frame_size,dv_decoder->width, dv_decoder->height,dv_decoder->num_dif_seqs,dv_decoder->system);

   sh_video->fps= (dv_decoder->system==e_dv_system_525_60?29.97:25);
   sh_video->frametime = 1.0/sh_video->fps;

  // emulate BITMAPINFOHEADER for win32 decoders:
  sh_video->bih=malloc(sizeof(BITMAPINFOHEADER));
  memset(sh_video->bih,0,sizeof(BITMAPINFOHEADER));
  sh_video->bih->biSize=40;
  sh_video->bih->biWidth = dv_decoder->width;
  sh_video->bih->biHeight = dv_decoder->height;
  sh_video->bih->biPlanes=1;
  sh_video->bih->biBitCount=24;
  sh_video->bih->biCompression=sh_video->format; // "DVSD"
  sh_video->bih->biSizeImage=sh_video->bih->biWidth*sh_video->bih->biHeight*3;


   frames->current_filepos=0;
   frames->current_frame=0;
   frames->frame_size=dv_decoder->frame_size;
   frames->frame_number=demuxer->stream->end_pos/frames->frame_size;

   mp_msg(MSGT_DEMUXER,MSGL_V,"demux_open_rawdv() seek to %qu, size: %d, dv_dec->frame_size: %d\n",frames->current_filepos,frames->frame_size, dv_decoder->frame_size);
    if (dv_decoder->audio != NULL && demuxer->audio->id>=-1){
       sh_audio_t *sh_audio =  new_sh_audio(demuxer, 0);
	    demuxer->audio->sh = sh_audio;
	    sh_audio->ds = demuxer->audio;
       mp_msg(MSGT_DEMUXER,MSGL_V,"demux_open_rawdv() chan: %d samplerate: %d\n",dv_decoder->audio->num_channels,dv_decoder->audio->frequency );
       // custom fourcc for internal MPlayer use
       sh_audio->format = mmioFOURCC('R', 'A', 'D', 'V');

	sh_audio->wf = malloc(sizeof(WAVEFORMATEX));
	memset(sh_audio->wf, 0, sizeof(WAVEFORMATEX));
	sh_audio->wf->wFormatTag = sh_audio->format;
	sh_audio->wf->nChannels = dv_decoder->audio->num_channels;
	sh_audio->wf->wBitsPerSample = 16;
	sh_audio->wf->nSamplesPerSec = dv_decoder->audio->frequency;
	// info about the input stream:
	sh_audio->wf->nAvgBytesPerSec = sh_video->fps*dv_decoder->frame_size;
	sh_audio->wf->nBlockAlign = dv_decoder->frame_size;

//       sh_audio->context=(void*)dv_decoder;
    }
   stream_reset(demuxer->stream);
   stream_seek(demuxer->stream, 0);
   dv_decoder_free(dv_decoder);  //we keep this in the context of both stream headers
   demuxer->priv=frames;
   return demuxer;
}
Beispiel #7
0
int dv_read_video(dv_t *dv, 
		unsigned char **output_rows, 
		unsigned char *data, 
		long bytes,
		int color_model)
{
	int dif = 0;
	int lost_coeffs = 0;
	long offset = 0;
	int isPAL = 0;
	int is61834 = 0;
	int numDIFseq;
	int ds;
	int i, v, b, m;
	dv_block_t *bl;
	long mb_offset;
	dv_sample_t sampling;
	dv_macroblock_t *mb;
	int pixel_size;
	int pitches[3];
	int use_temp = color_model != BC_YUV422;
	unsigned char *pixels[3];

//printf("dv_read_video 1 %d\n", color_model);
	pthread_mutex_lock(&dv_lock);
	switch(bytes)
	{
		case DV_PAL_SIZE:
			break;
		case DV_NTSC_SIZE:
			break;
		default:
			return 1;
			break;
	}

	if(data[0] != 0x1f) return 1;

	pitches[0] = DV_WIDTH * 2;
	pitches[1] = 0;
	pitches[2] = 0;
	pixels[1] = 0;
	pixels[2] = 0;

	dv_parse_header(dv->decoder, data);

	if(!use_temp)
	{
//printf("dv_read_video 1\n");
		pixels[0] = output_rows[0];
		dv_decode_full_frame(dv->decoder, 
			data, 
			e_dv_color_yuv, 
			output_rows, 
			pitches);
//printf("dv_read_video 2\n");
	}
	else
	{
		unsigned char *temp_rows[DV_HEIGHT];
		if(!dv->temp_video)
			dv->temp_video = calloc(1, DV_WIDTH * DV_HEIGHT * 2);

		for(i = 0; i < DV_HEIGHT; i++)
		{
			temp_rows[i] = dv->temp_video + i * DV_WIDTH * 2;
		}

		pixels[0] = dv->temp_video;
//printf("dv_read_video 3 %p\n", data);
		dv_decode_full_frame(dv->decoder, 
			data, 
			e_dv_color_yuv, 
			pixels, 
			pitches);
//printf("dv_read_video 4\n");

		cmodel_transfer(output_rows, 
			temp_rows,
			output_rows[0],
			output_rows[1],
			output_rows[2],
			0,
			0,
			0,
			0, 
			0, 
			DV_WIDTH, 
			dv->decoder->height,
			0, 
			0, 
			DV_WIDTH, 
			dv->decoder->height,
			BC_YUV422, 
			color_model,
			0,
			DV_WIDTH,
			DV_WIDTH);
	}
	dv->decoder->prev_frame_decoded = 1;
	pthread_mutex_unlock(&dv_lock);
	return 0;
}
int lp_libdv_in::open_file(QString file)
{
	// Close opened file
	if(pv_fd > 0){
		if(close(pv_fd)<0){
			std::cerr << "lp_libdv_in::" << __FUNCTION__ << ": cannot close previous file\n";
			return -1;
		}
	}

	// convert QString to char*
	qstring_char qsc(file);
	char *tmp_path = 0;
	tmp_path = qsc.to_char();

	pv_fd = open(tmp_path, O_RDONLY);
	if(pv_fd < 0){
			std::cerr << "lp_libdv_in::" << __FUNCTION__ << ": cannot open file: " << tmp_path << std::endl;
			print_err_open(pv_fd);
			return -1;
	}

	// Parse the header 
	if((pv_file_readen = read(pv_fd, pv_file_buffer, 120000))<1){
		std::cerr << "lp_libdv_in::" << __FUNCTION__ << ": cannot parse video header - file read error\n";
		return -1;
	}
	if(dv_parse_header(pv_decoder, pv_file_buffer)<0){
		std::cerr << "lp_libdv_in::" << __FUNCTION__ << ": cannot parse video header - decoding error\n";
		return -1;
	}

	// Video formats
	pv_width = pv_decoder->width;
	pv_height = pv_decoder->height;

	// YUV format - RGB ? etc...
	switch(pv_decoder->sampling){
		case e_dv_sample_411:	// YUY2
			pv_video_format = lp_sample_411;
			break;
		case e_dv_sample_420:	// YV12 
			pv_video_format = lp_sample_420;
			break;
		case e_dv_sample_422:	// YUY2
			pv_video_format = lp_sample_422;
			break;
		default:
			fprintf(stderr, "Format YUV introuvable!\n");
			break;
	}

	// Qualité dv
	dv_set_quality(pv_decoder, DV_QUALITY_BEST);

	// NOTE: move this later...
	if(pv_sdl_out == 0){
		pv_sdl_out = new lp_sdl_out;
		if(pv_sdl_out == 0){
			std::cerr << "lp_libdv_in::" << __FUNCTION__ << ": cannot instantiate sdl output\n";
			return -1;
		}
	}
	if(pv_sdl_out->init(pv_width, pv_height, pv_video_format)<0){
			std::cerr << "lp_libdv_in::" << __FUNCTION__ << ": cannot initialize sdl output\n";
			return -1;
	}

	// Audio parameters
	pv_dv_audio_channels = pv_decoder->audio->num_channels;
	if(pv_dv_audio_channels > 2){
		std::cerr << "lp_libdv_in::" << __FUNCTION__ << ": dv audio stream has more than 2 channels !\n";
	}
	if(pv_dv_audio_channels < 2){
		std::cerr << "lp_libdv_in::" << __FUNCTION__ << ": dv audio stream has less than 2 channels !\n";
	}
	pv_audio_num_ready = 0;
	pv_audio_decoder_start = 0;
	pv_audio_consumer_start = 0;

	// DEBUG
	std::cout << "Ouverture de " << tmp_path << std::endl;

	if(tmp_path != 0){
		free(tmp_path);
	}

	return 0;
}
Beispiel #9
0
static GstFlowReturn
gst_dvdec_chain (GstPad * pad, GstObject * parent, GstBuffer * buf)
{
  GstDVDec *dvdec;
  guint8 *inframe;
  guint8 *outframe_ptrs[3];
  gint outframe_pitches[3];
  GstMapInfo map;
  GstVideoFrame frame;
  GstBuffer *outbuf;
  GstFlowReturn ret = GST_FLOW_OK;
  guint length;
  guint64 cstart = GST_CLOCK_TIME_NONE, cstop = GST_CLOCK_TIME_NONE;
  gboolean PAL, wide;

  dvdec = GST_DVDEC (parent);

  gst_buffer_map (buf, &map, GST_MAP_READ);
  inframe = map.data;

  /* buffer should be at least the size of one NTSC frame, this should
   * be enough to decode the header. */
  if (G_UNLIKELY (map.size < NTSC_BUFFER))
    goto wrong_size;

  /* preliminary dropping. unref and return if outside of configured segment */
  if ((dvdec->segment.format == GST_FORMAT_TIME) &&
      (!(gst_segment_clip (&dvdec->segment, GST_FORMAT_TIME,
                  GST_BUFFER_TIMESTAMP (buf),
                  GST_BUFFER_TIMESTAMP (buf) + GST_BUFFER_DURATION (buf),
                  &cstart, &cstop))))
    goto dropping;

  if (G_UNLIKELY (dv_parse_header (dvdec->decoder, inframe) < 0))
    goto parse_header_error;

  /* get size */
  PAL = dv_system_50_fields (dvdec->decoder);
  wide = dv_format_wide (dvdec->decoder);

  /* check the buffer is of right size after we know if we are
   * dealing with PAL or NTSC */
  length = (PAL ? PAL_BUFFER : NTSC_BUFFER);
  if (G_UNLIKELY (map.size < length))
    goto wrong_size;

  dv_parse_packs (dvdec->decoder, inframe);

  if (dvdec->video_offset % dvdec->drop_factor != 0)
    goto skip;

  /* renegotiate on change */
  if (PAL != dvdec->PAL || wide != dvdec->wide) {
    dvdec->src_negotiated = FALSE;
    dvdec->PAL = PAL;
    dvdec->wide = wide;
  }

  dvdec->height = (dvdec->PAL ? PAL_HEIGHT : NTSC_HEIGHT);

  dvdec->interlaced = !dv_is_progressive (dvdec->decoder);

  /* negotiate if not done yet */
  if (!dvdec->src_negotiated) {
    if (!gst_dvdec_src_negotiate (dvdec))
      goto not_negotiated;
  }

  if (gst_pad_check_reconfigure (dvdec->srcpad)) {
    GstCaps *caps;

    caps = gst_pad_get_current_caps (dvdec->srcpad);
    if (!caps)
      goto not_negotiated;

    gst_dvdec_negotiate_pool (dvdec, caps, &dvdec->vinfo);
    gst_caps_unref (caps);
  }

  if (dvdec->need_segment) {
    gst_pad_push_event (dvdec->srcpad, gst_event_new_segment (&dvdec->segment));
    dvdec->need_segment = FALSE;
  }

  ret = gst_buffer_pool_acquire_buffer (dvdec->pool, &outbuf, NULL);
  if (G_UNLIKELY (ret != GST_FLOW_OK))
    goto no_buffer;

  gst_video_frame_map (&frame, &dvdec->vinfo, outbuf, GST_MAP_WRITE);

  outframe_ptrs[0] = GST_VIDEO_FRAME_COMP_DATA (&frame, 0);
  outframe_pitches[0] = GST_VIDEO_FRAME_COMP_STRIDE (&frame, 0);

  /* the rest only matters for YUY2 */
  if (dvdec->bpp < 3) {
    outframe_ptrs[1] = GST_VIDEO_FRAME_COMP_DATA (&frame, 1);
    outframe_ptrs[2] = GST_VIDEO_FRAME_COMP_DATA (&frame, 2);

    outframe_pitches[1] = GST_VIDEO_FRAME_COMP_STRIDE (&frame, 1);
    outframe_pitches[2] = GST_VIDEO_FRAME_COMP_STRIDE (&frame, 2);
  }

  GST_DEBUG_OBJECT (dvdec, "decoding and pushing buffer");
  dv_decode_full_frame (dvdec->decoder, inframe,
      e_dv_color_yuv, outframe_ptrs, outframe_pitches);

  gst_video_frame_unmap (&frame);

  GST_BUFFER_FLAG_UNSET (outbuf, GST_VIDEO_BUFFER_FLAG_TFF);

  GST_BUFFER_OFFSET (outbuf) = GST_BUFFER_OFFSET (buf);
  GST_BUFFER_OFFSET_END (outbuf) = GST_BUFFER_OFFSET_END (buf);

  /* FIXME : Compute values when using non-TIME segments,
   * but for the moment make sure we at least don't set bogus values
   */
  if (GST_CLOCK_TIME_IS_VALID (cstart)) {
    GST_BUFFER_TIMESTAMP (outbuf) = cstart;
    if (GST_CLOCK_TIME_IS_VALID (cstop))
      GST_BUFFER_DURATION (outbuf) = cstop - cstart;
  }

  ret = gst_pad_push (dvdec->srcpad, outbuf);

skip:
  dvdec->video_offset++;

done:
  gst_buffer_unmap (buf, &map);
  gst_buffer_unref (buf);

  return ret;

  /* ERRORS */
wrong_size:
  {
    GST_ELEMENT_ERROR (dvdec, STREAM, DECODE,
        (NULL), ("Input buffer too small"));
    ret = GST_FLOW_ERROR;
    goto done;
  }
parse_header_error:
  {
    GST_ELEMENT_ERROR (dvdec, STREAM, DECODE,
        (NULL), ("Error parsing DV header"));
    ret = GST_FLOW_ERROR;
    goto done;
  }
not_negotiated:
  {
    GST_DEBUG_OBJECT (dvdec, "could not negotiate output");
    ret = GST_FLOW_NOT_NEGOTIATED;
    goto done;
  }
no_buffer:
  {
    GST_DEBUG_OBJECT (dvdec, "could not allocate buffer");
    goto done;
  }

dropping:
  {
    GST_DEBUG_OBJECT (dvdec,
        "dropping buffer since it's out of the configured segment");
    goto done;
  }
}
int main( int argc, char **argv)
{
    int infile = 0;
    unsigned char dv_buffer[144000];
    unsigned char video_buffer[720 * 576 * 3];
    int16_t *audio_bufs[4];
    dv_decoder_t *decoder = NULL;
    dv_encoder_t *encoder = NULL;
    int pitches[3];
    unsigned char *pixels[3];
    int i = 0, j;
    int isPAL = FALSE;

    pitches[0] = 720 * 2;
    pixels[0] = video_buffer;

    for(i = 0; i < 4; i++) {
        audio_bufs[i] = malloc(DV_AUDIO_MAX_SAMPLES*sizeof(int16_t));
    }

    /* assume NTSC for now, switch to PAL later if needed */
    decoder = dv_decoder_new(FALSE, FALSE, FALSE);
    encoder = dv_encoder_new(FALSE, FALSE, FALSE);

    decoder->quality = DV_QUALITY_BEST;
    encoder->vlc_encode_passes = 3;
    encoder->static_qno = 0;
    encoder->force_dct = DV_DCT_AUTO;

    i = 0;
    while (read_frame(stdin, dv_buffer, &isPAL)) {
        dv_parse_header(decoder, dv_buffer);
        if (isPAL != encoder->isPAL && isPAL == TRUE) {
            decoder->clamp_luma = FALSE;
            decoder->clamp_chroma = FALSE;
            encoder->clamp_luma = FALSE;
            encoder->clamp_chroma = FALSE;
            dv_reconfigure(FALSE, FALSE);
        } else if (isPAL != encoder->isPAL) {
            decoder->clamp_luma = TRUE;
            decoder->clamp_chroma = TRUE;
            decoder->add_ntsc_setup = TRUE;
            encoder->clamp_luma = TRUE;
            encoder->clamp_chroma = TRUE;
            encoder->rem_ntsc_setup = TRUE;
            dv_reconfigure(TRUE, TRUE);
        }
        encoder->isPAL = isPAL;
        encoder->is16x9 = (dv_format_wide(decoder)>0);
        dv_decode_full_audio(decoder, dv_buffer, audio_bufs);
        for (j = 0; j < TIMES; j++) {
            dv_decode_full_frame(decoder, dv_buffer, e_dv_color_yuv,
                                 pixels, pitches);

            dv_encode_full_frame(encoder, pixels, e_dv_color_yuv, dv_buffer);
        }
        dv_encode_full_audio(encoder, audio_bufs, 2, 48000, dv_buffer);
        fwrite(dv_buffer, 1, (isPAL ? 144000 : 120000), stdout);
    }

    close(infile);

    for(i=0; i < 4; i++) free(audio_bufs[i]);
    dv_decoder_free(decoder);
    dv_encoder_free(encoder);

    return 0;
}
Beispiel #11
0
static int decode(quicktime_t *file, unsigned char **row_pointers, int track)
{
    long bytes;
    quicktime_video_map_t *vtrack = &(file->vtracks[track]);
    quicktime_dv_codec_t *codec = ((quicktime_codec_t*)vtrack->codec)->priv;
    int width = vtrack->track->tkhd.track_width;
    int height = vtrack->track->tkhd.track_height;
    int result = 0;
    int i;
    int decode_colormodel = 0;
    int pitches[3] = { 720 * 2, 0, 0 };


    quicktime_set_video_position(file, vtrack->current_position, track);
    bytes = quicktime_frame_size(file, vtrack->current_position, track);
    result = !quicktime_read_data(file, (char*)codec->data, bytes);

    if( codec->dv_decoder && codec->parameters_changed )
    {
        dv_decoder_free( codec->dv_decoder );
        codec->dv_decoder = NULL;
        codec->parameters_changed = 0;
    }

    if( ! codec->dv_decoder )
    {
        pthread_mutex_lock( &libdv_init_mutex );


        codec->dv_decoder = dv_decoder_new( codec->add_ntsc_setup,
                                            codec->clamp_luma,
                                            codec->clamp_chroma );
        codec->dv_decoder->prev_frame_decoded = 0;

        codec->parameters_changed = 0;
        pthread_mutex_unlock( &libdv_init_mutex );
    }

    if(codec->dv_decoder)
    {
        int is_sequential =
            check_sequentiality( row_pointers,
                                 720 * cmodel_calculate_pixelsize(file->color_model),
                                 file->out_h );

        codec->dv_decoder->quality = codec->decode_quality;

        dv_parse_header( codec->dv_decoder, codec->data );

// Libdv improperly decodes RGB colormodels.
        if((file->color_model == BC_YUV422 ||
                file->color_model == BC_RGB888) &&
                file->in_x == 0 &&
                file->in_y == 0 &&
                file->in_w == width &&
                file->in_h == height &&
                file->out_w == width &&
                file->out_h == height &&
                is_sequential)
        {
            if( file->color_model == BC_YUV422 )
            {
                pitches[0] = 720 * 2;
                dv_decode_full_frame( codec->dv_decoder, codec->data,
                                      e_dv_color_yuv, row_pointers,
                                      pitches );
            }
            else if( file->color_model == BC_RGB888)
            {
                pitches[0] = 720 * 3;
                dv_decode_full_frame( codec->dv_decoder, codec->data,
                                      e_dv_color_rgb, row_pointers,
                                      pitches );
            }
        }
        else
        {
            if(!codec->temp_frame)
            {
                codec->temp_frame = malloc(720 * 576 * 2);
                codec->temp_rows = malloc(sizeof(unsigned char*) * 576);
                for(i = 0; i < 576; i++)
                    codec->temp_rows[i] = codec->temp_frame + 720 * 2 * i;
            }

            decode_colormodel = BC_YUV422;
            pitches[0] = 720 * 2;
            dv_decode_full_frame( codec->dv_decoder, codec->data,
                                  e_dv_color_yuv, codec->temp_rows,
                                  pitches );




            cmodel_transfer(row_pointers,
                            codec->temp_rows,
                            row_pointers[0],
                            row_pointers[1],
                            row_pointers[2],
                            codec->temp_rows[0],
                            codec->temp_rows[1],
                            codec->temp_rows[2],
                            file->in_x,
                            file->in_y,
                            file->in_w,
                            file->in_h,
                            0,
                            0,
                            file->out_w,
                            file->out_h,
                            decode_colormodel,
                            file->color_model,
                            0,
                            width,
                            file->out_w);
        }
    }

//printf(__FUNCTION__ " 2\n");
    return result;
}
Beispiel #12
0
dv_t	*rawdv_open_input_file(const char *filename, int mmap_size)
{
	dv_t *dv = (dv_t*) vj_malloc(sizeof(dv_t));
	if(!dv) return NULL;
	memset(dv, 0, sizeof(dv_t));
	dv_decoder_t *decoder = NULL;

	uint8_t *tmp = (uint8_t*) vj_malloc(sizeof(uint8_t) * DV_HEADER_SIZE);
	memset( tmp, 0, sizeof(uint8_t) * DV_HEADER_SIZE);
	off_t file_size = 0;
	int n = 0;

	decoder = dv_decoder_new( 1,0,0);
	dv->fd = open( filename, O_RDONLY );
	
	if(!dv->fd)
	{
		dv_decoder_free(decoder); 
		rawdv_free(dv);
		veejay_msg(VEEJAY_MSG_ERROR, "Unable to open file '%s'",filename);
		if(tmp)free(tmp);
		return NULL;
	}
	/* fseek sometimes lies about filesize - seek to end (lseek returns file offset from start)*/
	file_size = lseek( dv->fd, 0, SEEK_END );
	if( file_size < DV_HEADER_SIZE)
	{
		dv_decoder_free(decoder);
		veejay_msg(VEEJAY_MSG_ERROR, "File %s is not a DV file", filename);
		rawdv_free(dv);
		if(tmp) free(tmp);
		return NULL;
	}
	/* And back to start offset */
	if( lseek(dv->fd,0, SEEK_SET ) < 0)
	{
		dv_decoder_free(decoder);
		veejay_msg(VEEJAY_MSG_ERROR, "Seek error in %s", filename);
		rawdv_free(dv);
		if(tmp) free(tmp);
		return NULL;
	}

	dv->mmap_region = NULL;
	if( mmap_size > 0 ) // user wants mmap
	{
		dv->mmap_region = mmap_file( dv->fd, 0, (mmap_size * 720 * 576 * 3),
			file_size );
	}

	if( dv->mmap_region == NULL )
	{
		if(mmap_size>0)
			veejay_msg(VEEJAY_MSG_DEBUG, "Mmap of DV file failed - fallback to read");
		n = read( dv->fd, tmp, DV_HEADER_SIZE );
	}
	else
	{
		n = mmap_read( dv->mmap_region, 0, DV_HEADER_SIZE, tmp );
	}

	if( n <= 0 )
	{
		dv_decoder_free(decoder);
		rawdv_free(dv);
		if(tmp) free(tmp);
		veejay_msg(VEEJAY_MSG_ERROR, "Cannot read from '%s'", filename);
		return NULL;
	}

	if( dv_parse_header( decoder, tmp) < 0 )
	{
		dv_decoder_free( decoder );
		rawdv_free(dv);
		if(tmp) free(tmp);
		veejay_msg(VEEJAY_MSG_ERROR, "Cannot parse header of file %s", filename);
		return NULL;
	}
/*	if(decoder->sampling == e_dv_sample_411)
	{
		dv_decoder_free( decoder );
		rawdv_free(dv);
		if(tmp) free(tmp);
		return NULL;
	}*/


	if(dv_is_PAL( decoder ) )
		dv->chunk_size = DV_PAL_SIZE;
	else
		dv->chunk_size = DV_NTSC_SIZE;

	dv->width = decoder->width;
	dv->height = decoder->height;
	dv->audio_rate = decoder->audio->frequency;
	dv->audio_chans = decoder->audio->num_channels;
	dv->audio_qbytes = decoder->audio->quantization;
	dv->fps = ( dv_is_PAL( decoder) ? 25.0 : 29.97 );
	dv->size = decoder->frame_size;
	dv->num_frames = (file_size - DV_HEADER_SIZE) / dv->size;
	dv->fmt = decoder->sampling;	
//	dv->fmt = ( decoder->sampling == e_dv_sample_422 ? 1 : 0);
	dv->buf = (uint8_t*) vj_malloc(sizeof(uint8_t*) * dv->size);
	dv->offset = 0;

	veejay_msg(VEEJAY_MSG_DEBUG,
			"DV properties %d x %d, %f, %d frames, %d sampling",
			dv->width,dv->height, dv->fps, dv->num_frames,
			dv->fmt );
	
	dv_decoder_free( decoder );

	if(tmp)
		free(tmp);

/*	if(dv->audio_rate)
	{
		int i;
		for( i = 0; i < 4; i ++ )
		dv->audio_buffers[i] = (int16_t*) vj_malloc(sizeof(int16_t) * 2 * DV_AUDIO_MAX_SAMPLES);
	}*/

/*
	veejay_msg(VEEJAY_MSG_DEBUG,
		"rawDV: num frames %ld, dimensions %d x %d, at %2.2f in %s",
		dv->num_frames,
		dv->width,
		dv->height,
		dv->fps,
		(dv->fmt==1?"422":"420"));
	veejay_msg(VEEJAY_MSG_DEBUG,
		"rawDV: frame size %d, rate %d, channels %d, bits %d",
		dv->size,
		dv->audio_rate,
		dv->audio_chans,
		dv->audio_qbytes);*/

	return dv;
}
Beispiel #13
0
void DVFrame::ExtractHeader( void )
{
	dv_parse_header( decoder, data );
	dv_parse_packs( decoder, data );
}
Beispiel #14
0
void writeoutYUV4MPEGheader(int out_fd,
			    LavParam *param,
			    EditList el,
			    y4m_stream_info_t *streaminfo)
{
   int n;

   y4m_si_set_width(streaminfo, param->output_width);
   y4m_si_set_height(streaminfo, param->output_height);
   y4m_si_set_interlace(streaminfo, param->interlace);
   y4m_si_set_framerate(streaminfo, mpeg_conform_framerate(el.video_fps));
   if (!Y4M_RATIO_EQL(param->sar, y4m_sar_UNKNOWN)) {
     y4m_si_set_sampleaspect(streaminfo, param->sar);
   } else if ((el.video_sar_width != 0) || (el.video_sar_height != 0)) {
     y4m_ratio_t sar;
     sar.n = el.video_sar_width;
     sar.d = el.video_sar_height;
     y4m_si_set_sampleaspect(streaminfo, sar);
   } else {
     /* no idea! ...eh, just guess. */
     mjpeg_warn("unspecified sample-aspect-ratio --- taking a guess...");
     y4m_si_set_sampleaspect(streaminfo,
			     y4m_guess_sar(param->output_width, 
					   param->output_height,
					   param->dar));
   }

   switch (el_video_frame_data_format(0, &el)) { /* FIXME: checking only 0-th frame. */
   case DATAFORMAT_YUV420:
     switch (param->chroma) {
     case Y4M_UNKNOWN:
     case Y4M_CHROMA_420JPEG:
       break;
     case Y4M_CHROMA_420MPEG2:
     case Y4M_CHROMA_420PALDV:
       mjpeg_warn("4:2:0 chroma should be '420jpeg' with this input");
       break;
     default:
       mjpeg_error_exit1("must specify 4:2:0 chroma (should be '420jpeg') with this input");
       break;
     }
     break;

   case DATAFORMAT_YUV422:
     switch (param->chroma) {
     case Y4M_CHROMA_422:
       break;
     default:
       mjpeg_error_exit1("must specify chroma '422' with this input");
       break;
     }
     break;

   case DATAFORMAT_DV2:
#ifndef HAVE_LIBDV
     mjpeg_error_exit1("DV input was not configured at compile time");
#else
     el_get_video_frame(jpeg_data, 0, &el); /* FIXME: checking only 0-th frame. */
     dv_parse_header(decoder, jpeg_data);
     switch(decoder->sampling) {
     case e_dv_sample_420:
       switch (param->chroma) {
       case Y4M_UNKNOWN:
	 mjpeg_info("set chroma '420paldv' from input");
	 param->chroma = Y4M_CHROMA_420PALDV;
	 break;
       case Y4M_CHROMA_420PALDV:
	 break;
       case Y4M_CHROMA_420JPEG:
       case Y4M_CHROMA_420MPEG2:
	 mjpeg_warn("4:2:0 chroma should be '420paldv' with this input");
	 break;
       case Y4M_CHROMA_422:
         if(libdv_pal_yv12 == 1 )
	   mjpeg_error_exit1("must specify 4:2:0 chroma (should be '420paldv') with this input");
	 break;
       default:
	 mjpeg_error_exit1("must specify 4:2:0 chroma (should be '420paldv') with this input");
	 break;
       }
       break;
     case e_dv_sample_411:
       if (param->chroma != Y4M_CHROMA_411)
	 mjpeg_info("chroma '411' recommended with this input");
       switch (param->chroma) {
       case Y4M_CHROMA_420MPEG2:
       case Y4M_CHROMA_420PALDV:
	 mjpeg_warn("4:2:0 chroma should be '420jpeg' with this input");
	 break;
       }
       break;
     case e_dv_sample_422:
       if (param->chroma != Y4M_CHROMA_422)
	 mjpeg_info("chroma '422' recommended with this input");
       switch (param->chroma) {
       case Y4M_CHROMA_420MPEG2:
       case Y4M_CHROMA_420PALDV:
	 mjpeg_warn("4:2:0 chroma should be '420jpeg' with this input");
	 break;
       }
       break;
     default:
       break;
     }
#endif
     break;

   case DATAFORMAT_MJPG:
     if (param->chroma != Y4M_CHROMA_422 && el.chroma == Y4M_CHROMA_422)
       mjpeg_info("chroma '422' recommended with this input");
     switch (param->chroma) {
     case Y4M_CHROMA_420MPEG2:
     case Y4M_CHROMA_420PALDV:
       mjpeg_warn("4:2:0 chroma should be '420jpeg' with this input");
       break;
     }
     break;
   }
   if (param->chroma == Y4M_UNKNOWN) {
     mjpeg_info("set default chroma '420jpeg'");
     param->chroma = Y4M_CHROMA_420JPEG;
   }
   y4m_si_set_chroma(streaminfo, param->chroma);

   n = y4m_write_stream_header(out_fd, streaminfo);
   if (n != Y4M_OK)
      mjpeg_error("Failed to write stream header: %s", y4m_strerr(n));
}
Beispiel #15
0
/*
 * readframe - read jpeg or dv frame into yuv buffer
 *
 * returns:
 *	0   success
 *	1   fatal error
 *	2   corrupt data encountered; 
 *		decoding can continue, but this frame may be damaged 
 */
int readframe(int numframe, 
	      uint8_t *frame[],
	      LavParam *param,
	      EditList el)
{
  int len, i, res, data_format;
  uint8_t *frame_tmp;
  int warn;
  warn = 0;

  if (MAX_JPEG_LEN < el.max_frame_size) {
    mjpeg_error_exit1( "Max size of JPEG frame = %ld: too big",
		       el.max_frame_size);
  }
  
  len = el_get_video_frame(jpeg_data, numframe, &el);
  data_format = el_video_frame_data_format(numframe, &el);
  
  switch(data_format) {

  case DATAFORMAT_DV2 :
#ifndef HAVE_LIBDV
    mjpeg_error("DV input was not configured at compile time");
    res = 1;
#else
    mjpeg_debug("DV frame %d   len %d",numframe,len);
    res = 0;
    dv_parse_header(decoder, jpeg_data);
    switch(decoder->sampling) {
    case e_dv_sample_420:
      /* libdv decodes PAL DV directly as planar YUV 420
       * (YV12 or 4CC 0x32315659) if configured with the flag
       * --with-pal-yuv=YV12 which is not (!) the default
       */
      if (libdv_pal_yv12 == 1) {
	pitches[0] = decoder->width;
	pitches[1] = decoder->width / 2;
	pitches[2] = decoder->width / 2;
	if (pitches[0] != param->output_width ||
	    pitches[1] != param->chroma_width) {
	  mjpeg_error("for DV 4:2:0 only full width output is supported");
	  res = 1;
	} else {
	  dv_decode_full_frame(decoder, jpeg_data, e_dv_color_yuv,
			       frame, (int *)pitches);
	  /* swap the U and V components */
	  frame_tmp = frame[2];
	  frame[2] = frame[1];
	  frame[1] = frame_tmp;
	}
	break;
      }
    case e_dv_sample_411:
    case e_dv_sample_422:
      /* libdv decodes NTSC DV (native 411) and by default also PAL
       * DV (native 420) as packed YUV 422 (YUY2 or 4CC 0x32595559)
       * where the U and V information is repeated.  This can be
       * transformed to planar 420 (YV12 or 4CC 0x32315659).
       * For NTSC DV this transformation is lossy.
       */
      pitches[0] = decoder->width * 2;
      pitches[1] = 0;
      pitches[2] = 0;
      if (decoder->width != param->output_width) {
	mjpeg_error("for DV only full width output is supported");
	res = 1;
      } else {
	dv_decode_full_frame(decoder, jpeg_data, e_dv_color_yuv,
			     dv_frame, (int *)pitches);
	frame_YUV422_to_planar(frame, dv_frame[0],
			       decoder->width,	decoder->height,
			       param->chroma);
      }
      break;
    default:
      res = 1;
      break;
    }
#endif /* HAVE_LIBDV */
    break;

  case DATAFORMAT_YUV420 :
  case DATAFORMAT_YUV422 :
    mjpeg_debug("raw YUV frame %d   len %d",numframe,len);
    frame_tmp = jpeg_data;
    memcpy(frame[0], frame_tmp, param->luma_size);
    frame_tmp += param->luma_size;
    memcpy(frame[1], frame_tmp, param->chroma_size);
    frame_tmp += param->chroma_size;
    memcpy(frame[2], frame_tmp, param->chroma_size);
    res = 0;
    break;

  default:
    mjpeg_debug("MJPEG frame %d   len %d",numframe,len);
    res = decode_jpeg_raw(jpeg_data, len, el.video_inter,
			  param->chroma,
			  param->output_width, param->output_height,
			  frame[0], frame[1], frame[2]);
  }
  
  if (res < 0) {
    mjpeg_warn( "Fatal Error Decoding Frame %d", numframe);
    return 1;
  } else if (res == 1) {
    mjpeg_warn( "Decoding of Frame %d failed", numframe);
    warn = 1;
    res = 0;
  }
  
  
  if (param->mono) {
    for (i = 0;
	 i < param->chroma_size;
	 ++i) {
      frame[1][i] = 0x80;
      frame[2][i] = 0x80;
    }
  }

  if(warn)
	  return 2;
  else
	  return 0;
}
Beispiel #16
0
static GstFlowReturn
gst_dvdec_chain (GstPad * pad, GstBuffer * buf)
{
  GstDVDec *dvdec;
  guint8 *inframe;
  guint8 *outframe;
  guint8 *outframe_ptrs[3];
  gint outframe_pitches[3];
  GstBuffer *outbuf;
  GstFlowReturn ret = GST_FLOW_OK;
  guint length;
  gint64 cstart, cstop;
  gboolean PAL, wide;

  dvdec = GST_DVDEC (gst_pad_get_parent (pad));
  inframe = GST_BUFFER_DATA (buf);

  /* buffer should be at least the size of one NTSC frame, this should
   * be enough to decode the header. */
  if (G_UNLIKELY (GST_BUFFER_SIZE (buf) < NTSC_BUFFER))
    goto wrong_size;

  /* preliminary dropping. unref and return if outside of configured segment */
  if ((dvdec->segment->format == GST_FORMAT_TIME) &&
      (!(gst_segment_clip (dvdec->segment, GST_FORMAT_TIME,
                  GST_BUFFER_TIMESTAMP (buf),
                  GST_BUFFER_TIMESTAMP (buf) + GST_BUFFER_DURATION (buf),
                  &cstart, &cstop))))
    goto dropping;

  if (G_UNLIKELY (dv_parse_header (dvdec->decoder, inframe) < 0))
    goto parse_header_error;

  /* get size */
  PAL = dv_system_50_fields (dvdec->decoder);
  wide = dv_format_wide (dvdec->decoder);

  /* check the buffer is of right size after we know if we are
   * dealing with PAL or NTSC */
  length = (PAL ? PAL_BUFFER : NTSC_BUFFER);
  if (G_UNLIKELY (GST_BUFFER_SIZE (buf) < length))
    goto wrong_size;

  dv_parse_packs (dvdec->decoder, inframe);

  if (dvdec->video_offset % dvdec->drop_factor != 0)
    goto skip;

  /* renegotiate on change */
  if (PAL != dvdec->PAL || wide != dvdec->wide) {
    dvdec->src_negotiated = FALSE;
    dvdec->PAL = PAL;
    dvdec->wide = wide;
  }

  dvdec->height = (dvdec->PAL ? PAL_HEIGHT : NTSC_HEIGHT);


  /* negotiate if not done yet */
  if (!dvdec->src_negotiated) {
    if (!gst_dvdec_src_negotiate (dvdec))
      goto not_negotiated;
  }

  ret =
      gst_pad_alloc_buffer_and_set_caps (dvdec->srcpad, 0,
      (720 * dvdec->height) * dvdec->bpp,
      GST_PAD_CAPS (dvdec->srcpad), &outbuf);
  if (G_UNLIKELY (ret != GST_FLOW_OK))
    goto no_buffer;

  outframe = GST_BUFFER_DATA (outbuf);

  outframe_ptrs[0] = outframe;
  outframe_pitches[0] = 720 * dvdec->bpp;

  /* the rest only matters for YUY2 */
  if (dvdec->bpp < 3) {
    outframe_ptrs[1] = outframe_ptrs[0] + 720 * dvdec->height;
    outframe_ptrs[2] = outframe_ptrs[1] + 360 * dvdec->height;

    outframe_pitches[1] = dvdec->height / 2;
    outframe_pitches[2] = outframe_pitches[1];
  }

  GST_DEBUG_OBJECT (dvdec, "decoding and pushing buffer");
  dv_decode_full_frame (dvdec->decoder, inframe,
      e_dv_color_yuv, outframe_ptrs, outframe_pitches);

  GST_BUFFER_OFFSET (outbuf) = GST_BUFFER_OFFSET (buf);
  GST_BUFFER_OFFSET_END (outbuf) = GST_BUFFER_OFFSET_END (buf);
  GST_BUFFER_TIMESTAMP (outbuf) = cstart;
  GST_BUFFER_DURATION (outbuf) = cstop - cstart;

  ret = gst_pad_push (dvdec->srcpad, outbuf);

skip:
  dvdec->video_offset++;

done:
  gst_buffer_unref (buf);
  gst_object_unref (dvdec);

  return ret;

  /* ERRORS */
wrong_size:
  {
    GST_ELEMENT_ERROR (dvdec, STREAM, DECODE,
        (NULL), ("Input buffer too small"));
    ret = GST_FLOW_ERROR;
    goto done;
  }
parse_header_error:
  {
    GST_ELEMENT_ERROR (dvdec, STREAM, DECODE,
        (NULL), ("Error parsing DV header"));
    ret = GST_FLOW_ERROR;
    goto done;
  }
not_negotiated:
  {
    GST_DEBUG_OBJECT (dvdec, "could not negotiate output");
    ret = GST_FLOW_NOT_NEGOTIATED;
    goto done;
  }
no_buffer:
  {
    GST_DEBUG_OBJECT (dvdec, "could not allocate buffer");
    goto done;
  }

dropping:
  {
    GST_DEBUG_OBJECT (dvdec,
        "dropping buffer since it's out of the configured segment");
    goto done;
  }
}