Esempio n. 1
0
void sub_pred_altivec_verify(SUB_PRED_PDECL)
{
    int i;
    unsigned long checksum1, checksum2;
    signed short blkcpy[8*8];

    sub_pred_altivec(SUB_PRED_ARGS);
    for (checksum1 = i = 0; i < 8*8; i++)
	checksum1 += abs(blk[i]);

    memcpy(blkcpy, blk, 8*8*sizeof(short));

    ALTIVEC_TEST_WITH(sub_pred)(SUB_PRED_ARGS);
    for (checksum2 = i = 0; i < 8*8; i++)
	checksum2 += abs(blk[i]);

    if (checksum1 != checksum2) {
	mjpeg_debug("sub_pred(" SUB_PRED_PFMT ")", SUB_PRED_ARGS);
	mjpeg_debug("sub_pred: checksums differ %d != %d",
	    checksum1, checksum2);

	for (i = 0; i < 8*8; i++) {
	    if (blkcpy[i] != blk[i]) {
		mjpeg_debug("sub_pred: blk[%d] %d != %d",
		    i, blkcpy[i], blk[i]);
	    }
	}
    }
}
Esempio n. 2
0
// this method isn't too effective
int search_video_1 (int m, int s, int line, uint8_t *yuv_data[3],y4m_stream_info_t *sinfo)
{

    int w,h;
    int x1,x2;
    int min,shift,tot;
    int linew, line1w;

    int ilace = y4m_si_get_interlace(sinfo);

    w = y4m_si_get_plane_width(sinfo,0);
    h = y4m_si_get_plane_height(sinfo,0);

    linew = line * w;

    if (ilace == Y4M_ILACE_NONE)
        line1w = (line+1) * w ;
    else
        line1w = (line+2) * w;

    line1w = (line+2) * w;


    mjpeg_debug("search_video %d",line);

    // 2 or 1 dependent on interlace or not.
    if (line+2 > h) {
        mjpeg_warn("line > height");
        return 0;
    }

    shift = 0;
    for (x1=-m; x1<m; x1++)
    {
        tot = 0;
        for(x2=0; x2<s; x2++)
        {
            // don't know if I should apply a standard addition to pixels outside the box.
            if (x1+x2 >=0 && x1+x2 < w)
                tot += abs ( *(yuv_data[0]+x1+x2+linew) - *(yuv_data[0]+x2+line1w));
            else
                tot += 128;
        }

        // ok it wasn't max afterall, it was min.
        if (x1==0) min = tot;
        if (tot < min) {
            min = tot;
            shift = x1;
        }
    }

    mjpeg_debug("exit search_video %d",line);

    return shift;
}
Esempio n. 3
0
void LPCMStream::FillAUbuffer(unsigned int frames_to_buffer )
{
	last_buffered_AU += frames_to_buffer;
	mjpeg_debug( "Scanning %d MPEG LPCM audio frames to frame %d", 
				 frames_to_buffer, last_buffered_AU );

	while ( !bs.eos() 
            && decoding_order < last_buffered_AU 
            && !muxinto.AfterMaxPTS(access_unit.PTS) )
	{
		int skip=access_unit.length; 
        bs.SeekFwdBits( skip );
		prev_offset = AU_start;
		AU_start = bs.bitcount();
        if( AU_start - prev_offset != access_unit.length*8 )
        {
            mjpeg_warn("Discarding incomplete final frame LPCM  stream %d",
                       stream_num);
            aunits.DropLast();
            --decoding_order;
            break;
        }

        // Here we would check for header data but LPCM has no headers...
        if( bs.eos()   )
            break;

		access_unit.start = AU_start;
		access_unit.length = bytes_per_frame;
		access_unit.PTS = static_cast<clockticks>(decoding_order) * 
            (CLOCKS_per_90Kth_sec * ticks_per_frame_90kHz);
		access_unit.DTS = access_unit.PTS;
		access_unit.dorder = decoding_order;
		decoding_order++;
		aunits.Append( access_unit );
		num_frames++;
		
		num_syncword++;

		if (num_syncword >= old_frames+10 )
		{
			mjpeg_debug ("Got %d frame headers.", num_syncword);
			old_frames=num_syncword;
		}
        mjpeg_debug( "Got frame %d\n", decoding_order );

    }
	last_buffered_AU = decoding_order;
	eoscan =  bs.eos() || muxinto.AfterMaxPTS(access_unit.PTS);
}
Esempio n. 4
0
static
void alloc_buffers(uint8_t *buffers[], int width, int height)
{
  mjpeg_debug("Alloc'ing buffers");
  buffers[0] = malloc(width * height * 2 * sizeof(buffers[0][0]));
  buffers[1] = malloc(width * height * 2 * sizeof(buffers[1][0]));
  buffers[2] = malloc(width * height * 2 * sizeof(buffers[2][0]));
}
Esempio n. 5
0
void subsample_image_altivec_verify(SUBSAMPLE_IMAGE_PDECL)
{
    int width, height;
    unsigned long checksum44_1, checksum44_2;
    unsigned long checksum22_1, checksum22_2;
    unsigned char *cpy22, *cpy44;

    width = rowstride;
    height = (unsigned long)(sub22_image - image) / rowstride;

    cpy22 = (unsigned char*)malloc((width/2) * (height/2));
    cpy44 = (unsigned char*)malloc((width/4) * (height/4));
    if (cpy22 == NULL || cpy44 == NULL)
	mjpeg_error_exit1("subsample_image: malloc failed");

    subsample_image_altivec(SUBSAMPLE_IMAGE_ARGS);
    checksum22_1 = checksum(sub22_image, width/2, height/2, rowstride/2);
    checksum44_1 = checksum(sub44_image, width/4, height/4, rowstride/4);

    /* copy data for imgcmp */
    imgcpy(cpy22, sub22_image, width/2, height/2, rowstride/2);
    imgcpy(cpy44, sub44_image, width/4, height/4, rowstride/4);

    ALTIVEC_TEST_WITH(subsample_image)(SUBSAMPLE_IMAGE_ARGS);
    checksum22_2 = checksum(sub22_image, width/2, height/2, rowstride/2);
    checksum44_2 = checksum(sub44_image, width/4, height/4, rowstride/4);

    if (checksum22_1 != checksum22_2 || checksum44_1 != checksum44_2) {
	mjpeg_debug("subsample_image(" SUBSAMPLE_IMAGE_PFMT ")",
	    SUBSAMPLE_IMAGE_ARGS);
	if (checksum22_1 != checksum22_2)
	    mjpeg_debug("subsample_image: %s checksums differ %d != %d",
		"2*2", checksum22_1, checksum22_2);
	if (checksum44_1 != checksum44_2)
	    mjpeg_debug("subsample_image: %s checksums differ %d != %d",
		"4*4", checksum44_1, checksum44_2);

	imgcmp("2*2", cpy22, sub22_image, width/2, height/2, rowstride/2);
	imgcmp("4*4", cpy44, sub44_image, width/4, height/4, rowstride/4);
    }

    free(cpy22);
    free(cpy44);
}
Esempio n. 6
0
void add_pred_altivec_verify(ADD_PRED_PDECL)
{
    int i, j;
    unsigned long checksum1, checksum2;
    uint8_t *pcur;
    uint8_t curcpy[8][8];

    add_pred_altivec(ADD_PRED_ARGS);
    pcur = cur;
    checksum1 = 0;
    for (j = 0; j < 8; j++) {
	for (i = 0; i < 8; i++) {
	    checksum1 += pcur[i];
	    curcpy[j][i] = pcur[i];
	}
	pcur += lx;
    }

    ALTIVEC_TEST_WITH(add_pred)(ADD_PRED_ARGS);
    pcur = cur;
    checksum2 = 0;
    for (j = 0; j < 8; j++) {
	for (i = 0; i < 8; i++)
	    checksum2 += pcur[i];
	pcur += lx;
    }

    if (checksum1 != checksum2) {
	mjpeg_debug("add_pred(" ADD_PRED_PFMT ")", ADD_PRED_ARGS);
	mjpeg_debug("add_pred: checksums differ %d != %d",
	    checksum1, checksum2);

	pcur = cur;
	checksum1 = 0;
	for (j = 0; j < 8; j++) {
	    for (i = 0; i < 8; i++) {
		if (curcpy[j][i] != pcur[i])
		    mjpeg_debug("add_pred: cur[%d][%d] %d != %d",
			j, i, curcpy[j][i], pcur[i]);
	    }
	    pcur += lx;
	}
    }
}
Esempio n. 7
0
static void guarantee_huff_tables(j_decompress_ptr dinfo)
{
    if ((dinfo->dc_huff_tbl_ptrs[0] == NULL) &&
	(dinfo->dc_huff_tbl_ptrs[1] == NULL) &&
	(dinfo->ac_huff_tbl_ptrs[0] == NULL) &&
	(dinfo->ac_huff_tbl_ptrs[1] == NULL)) {
	mjpeg_debug("Generating standard Huffman tables for this frame.");
	std_huff_tables(dinfo);
    }
}
Esempio n. 8
0
static void iquant_intra_altivec_verify(IQUANT_INTRA_PDECL,
    void (*test)(IQUANT_INTRA_PDECL),
    void (*verify)(IQUANT_INTRA_PDECL))
{
    int i;
    unsigned long checksum1, checksum2;
    int16_t srccpy[64], dstcpy[64];
    uint16_t *qmat;

    qmat = (uint16_t*) wsp->intra_q_mat;

    /* in case src == dst */
    memcpy(srccpy, src, 64*sizeof(int16_t));

    (*test)(IQUANT_INTRA_ARGS);
    for (checksum1 = i = 0; i < 64; i++)
	checksum1 += dst[i];

    memcpy(dstcpy, dst, 64*sizeof(int16_t));

    memcpy(src, srccpy, 64*sizeof(int16_t));

    (*verify)(IQUANT_INTRA_ARGS);
    for (checksum2 = i = 0; i < 64; i++)
	checksum2 += dst[i];

    if (checksum1 != checksum2) {
	mjpeg_debug("iquant_intra(" IQUANT_INTRA_PFMT ")",
	    IQUANT_INTRA_ARGS);
	mjpeg_debug("iquant_intra: checksums differ %d != %d",
		    checksum1, checksum2);
    }

    for (i = 0; i < 64; i++) {
	if (dstcpy[i] != dst[i]) {
	    mjpeg_debug("iquant_intra: src[%d]=%d, qmat=%d, "
			"dst[%d]=%d != %d", i, srccpy[i], qmat[i]*mquant,
			i, dstcpy[i], dst[i]);
	}
    }
}
Esempio n. 9
0
static
void read_ppm_into_two_buffers(int fd,
			       uint8_t *buffers[], 
			       uint8_t *buffers2[], 
			       uint8_t *rowbuffer,
			       int width, int height, int bgr)
{
  int x, y;
  uint8_t *pixels;
  uint8_t *R = buffers[0];
  uint8_t *G = buffers[1];
  uint8_t *B = buffers[2];
  uint8_t *R2 = buffers2[0];
  uint8_t *G2 = buffers2[1];
  uint8_t *B2 = buffers2[2];

  mjpeg_debug("read into two buffers, %dx%d", width, height);
  height /= 2;
  for (y = 0; y < height; y++) {
    pixels = rowbuffer;
    if (y4m_read(fd, pixels, width * 3))
      mjpeg_error_exit1("read error A  y=%d", y);
    if (bgr) {
      for (x = 0; x < width; x++) {
	*(B++) = *(pixels++);
	*(G++) = *(pixels++);
	*(R++) = *(pixels++);
      }
    } else {
      for (x = 0; x < width; x++) {
	*(R++) = *(pixels++);
	*(G++) = *(pixels++);
	*(B++) = *(pixels++);
      }
    }
    pixels = rowbuffer;
    if (y4m_read(fd, pixels, width * 3))
      mjpeg_error_exit1("read error B  y=%d", y);
    if (bgr) {
      for (x = 0; x < width; x++) {
	*(B2++) = *(pixels++);
	*(G2++) = *(pixels++);
	*(R2++) = *(pixels++);
      }
    } else {
      for (x = 0; x < width; x++) {
	*(R2++) = *(pixels++);
	*(G2++) = *(pixels++);
	*(B2++) = *(pixels++);
      }
    }
  }
}
Esempio n. 10
0
static void imgcmp(const char *ss, uint8_t *a, uint8_t *b,
    int width, int height, int stride)
{
    int i, j;

    for (j = 0; j < height; j++) {
	for (i = 0; i < width; i++)
	    if (a[i] != b[i])
		mjpeg_debug("subsample_image: %s[%d][%d] %d != %d",
		    ss, j, i, a[i], b[i]);

	a += width;
	b += stride;
    }
}
Esempio n. 11
0
/** init_parse_files
 * Verifies the PNG input files and prepares YUV4MPEG header information.
 * @returns 0 on success
 */
static int init_parse_files(parameters_t *param)
{ 
  char pngname[255];

  snprintf(pngname, sizeof(pngname), 
	   param->pngformatstr, param->begin);
  mjpeg_debug("Analyzing %s to get the right pic params", pngname);
  
  if (decode_png(pngname, 0, param) == -1)
    mjpeg_error_exit1("Reading of %s failed.\n", pngname);

  mjpeg_info("Image dimensions are %ux%u",
	     param->width, param->height);
  
  mjpeg_info("Movie frame rate is:  %f frames/second",
	     Y4M_RATIO_DBL(param->framerate));

  switch (param->interlace) 
    {
    case Y4M_ILACE_NONE:
      mjpeg_info("Non-interlaced/progressive frames.");
      break;
    case Y4M_ILACE_BOTTOM_FIRST:
      mjpeg_info("Interlaced frames, bottom field first.");      
      break;
    case Y4M_ILACE_TOP_FIRST:
      mjpeg_info("Interlaced frames, top field first.");      
      break;
    default:
      mjpeg_error_exit1("Interlace has not been specified (use -I option)");
      break;
    }

  if ((param->interlace != Y4M_ILACE_NONE) && (param->interleave == -1))
    mjpeg_error_exit1("Interleave has not been specified (use -L option)");

  if (!(param->interleave) && (param->interlace != Y4M_ILACE_NONE)) 
    {
      param->height *= 2;
      mjpeg_info("Non-interleaved fields (image height doubled)");
    }
  mjpeg_info("Frame size:  %u x %u", param->width, param->height);

  return 0;
}
Esempio n. 12
0
static void input(int type, char *message)
{
    switch (type)
    {
    case LAVPLAY_MSG_ERROR:
        mjpeg_error("%s", message);
        break;
    case LAVPLAY_MSG_WARNING:
        mjpeg_warn("%s", message);
        break;
    case LAVPLAY_MSG_INFO:
        mjpeg_info("%s", message);
        break;
    case LAVPLAY_MSG_DEBUG:
        mjpeg_debug("%s", message);
        break;
    }
}
Esempio n. 13
0
int
main(int argc, char *argv[])
{
	int	i;
	long long avg, total;
	int	input_fd = 0;
	int	output_fd = 1;
	int	horz;
	int	vert;
	int	c;
	int	frame_count;

	y4m_stream_info_t istream, ostream;
	y4m_frame_info_t iframe;

	y4m_accept_extensions(1);

	while((c = getopt(argc, argv, "r:R:t:T:v:S:hI:w:fc:")) != EOF) {
		switch(c) {
		case 'r':
			radius_luma = atoi(optarg);
			break;
		case 'R':
			radius_chroma = atoi(optarg);
			break;
		case 't':
			threshold_luma = atoi(optarg);
			break;
		case 'T':
			threshold_chroma = atoi(optarg);
			break;
		case 'I':
			interlace = atoi (optarg);
			if (interlace != 0 && interlace != 1)
			{
				Usage (argv[0]);
				exit (1);
			}
			break;
		case 'S':
			param_skip = atoi (optarg);
			break;
		case 'f':
			param_fast = 1;
			break;
		case 'w':
			if (strcmp (optarg, "8") == 0)
				param_weight_type = 1;
			else if (strcmp (optarg, "2.667") == 0)
				param_weight_type = 2;
			else if (strcmp (optarg, "13.333") == 0)
				param_weight_type = 3;
			else if (strcmp (optarg, "24") == 0)
				param_weight_type = 4;
			else
				param_weight_type = 0;
			param_weight = atof (optarg);
			break;
                case 'c':
                        cutoff = atof(optarg);
                        break;
		case 'v':
			verbose = atoi (optarg);
			if (verbose < 0 || verbose >2)
			{
				Usage (argv[0]);
				exit (1);
			}
			break;		  
			
		case 'h':
                        Usage (argv[0]);
		default:
			exit(0);
		}
	}

        if( param_weight < 0 ) {
            if( param_fast )
                param_weight = 8.0;
            else
                param_weight = 1.0;
        }

        for( i=1; i<NUMAVG; i++ ) {
            avg_replace[i]=0;
            divisor[i]=((1<<DIVISORBITS)+(i>>1))/i;
            divoffset[i]=divisor[i]*(i>>1)+(divisor[i]>>1);
        }

#ifdef HAVE_ASM_MMX
        if( cpu_accel() & ACCEL_X86_MMXEXT )
            domean8=1;
#endif

	mjpeg_info ("fast %d, weight type %d\n", param_fast,
		param_weight_type);

	if (radius_luma <= 0 || radius_chroma <= 0)
	   mjpeg_error_exit1("radius values must be > 0!");

	if (threshold_luma < 0 || threshold_chroma < 0)
	   mjpeg_error_exit1("threshold values must be >= 0!");

   (void)mjpeg_default_handler_verbosity(verbose);

	y4m_init_stream_info(&istream);
	y4m_init_stream_info(&ostream);
	y4m_init_frame_info(&iframe);

	i = y4m_read_stream_header(input_fd, &istream);
	if (i != Y4M_OK)
	  mjpeg_error_exit1("Input stream error: %s", y4m_strerr(i));

	if (y4m_si_get_plane_count(&istream) != 3)
	   mjpeg_error_exit1("Only 3 plane formats supported");

	chroma_mode = y4m_si_get_chroma(&istream);
	SS_H = y4m_chroma_ss_x_ratio(chroma_mode).d;
	SS_V = y4m_chroma_ss_y_ratio(chroma_mode).d;

	mjpeg_debug("chroma subsampling: %dH %dV\n",SS_H,SS_V);

	if (interlace == -1)
	{
	  i = y4m_si_get_interlace(&istream);
	  switch (i)
	  {
	  case Y4M_ILACE_NONE:
	       interlace = 0;
	       break;
	  case Y4M_ILACE_BOTTOM_FIRST:
	  case Y4M_ILACE_TOP_FIRST:
	       interlace = 1;
	       break;
	  default:
	       mjpeg_warn("Unknown interlacing '%d', assuming non-interlaced", i);
	       interlace = 0;
	       break;
	  }
	}

	if( interlace && y4m_si_get_height(&istream) % 2 != 0 )
		mjpeg_error_exit1("Input images have odd number of lines - can't treats as interlaced!" );

	horz = y4m_si_get_width(&istream);
	vert = y4m_si_get_height(&istream);
	mjpeg_debug("width=%d height=%d luma_r=%d chroma_r=%d luma_t=%d chroma_t=%d", horz, vert, radius_luma, radius_chroma, threshold_luma, threshold_chroma);

	y4m_copy_stream_info(&ostream, &istream);

	input_frame[0] = malloc(horz * vert);
	input_frame[1] = malloc((horz / SS_H) * (vert / SS_V));
	input_frame[2] = malloc((horz / SS_H) * (vert / SS_V));

	output_frame[0] = malloc(horz * vert);
	output_frame[1] = malloc((horz / SS_H) * (vert / SS_V));
	output_frame[2] = malloc((horz / SS_H) * (vert / SS_V));


	y4m_write_stream_header(output_fd, &ostream);

	frame_count = 0;
	while (y4m_read_frame(input_fd, &istream, &iframe, input_frame) == Y4M_OK)
	{ 
		frame_count++;
		if (frame_count > param_skip)
		{
		  filter(horz, vert,  input_frame, output_frame);
		  y4m_write_frame(output_fd, &ostream, &iframe, output_frame);
		}
		else
		  y4m_write_frame(output_fd, &ostream, &iframe, input_frame);
	}

	for (total=0, avg=0, i=0; i < NUMAVG; i++) {
		total += avg_replace[i];
                avg   += avg_replace[i] * i; 
        }
	mjpeg_info("frames=%d avg=%3.1f", frame_count, ((double)avg)/((double)total));

	for (i=0; i < NUMAVG; i++) {
		mjpeg_debug( "%02d: %6.2f", i,
			(((double)avg_replace[i]) * 100.0)/(double)(total));
	}

	y4m_fini_stream_info(&istream);
	y4m_fini_stream_info(&ostream);
	y4m_fini_frame_info(&iframe);
	exit(0);
}
Esempio n. 14
0
void StillsStream::Init ( )
{
	int stream_id = -1;
	int buffer_size = -1;

	SetBufSize( 4*1024*1024 );
	ScanFirstSeqHeader();

	mjpeg_debug( "Stills: Video buffer suggestion ignored!" );
	switch( muxinto.mux_format )
	{
	case  MPEG_FORMAT_VCD_STILL :
		if( horizontal_size > 352 )
		{
			stream_id = VIDEO_STR_0+2 ;
			buffer_size = vbv_buffer_size*2048;
			mjpeg_info( "Stills Stream %02x: high-resolution VCD stills %d KB each", 
						stream_id,
						buffer_size );
			if( buffer_size < 46*1024 )
				mjpeg_error_exit1( "I Can't multiplex high-res stills smaller than normal res stills - sorry!");

		}
		else
		{
			stream_id = VIDEO_STR_0+1 ;
			buffer_size = 46*1024;
			mjpeg_info( "Stills Stream %02x: normal VCD stills", stream_id );
		}
		break;
	case MPEG_FORMAT_SVCD_STILL :
		if( horizontal_size > 480 )
		{
			stream_id = VIDEO_STR_0+1;
			buffer_size = 230*1024;
			mjpeg_info( "Stills Stream %02x: high-resolution SVCD stills.", 
						stream_id );
		}
		else
		{
			stream_id = VIDEO_STR_0+1 ;
			buffer_size = 230*1024;
			mjpeg_info( "Stills Stream %02x: normal-resolution SVCD stills.", stream_id );
		}
		break;
	default:
		mjpeg_error_exit1( "Only SVCD and VCD Still currently supported");
	}


	MuxStream::Init( stream_id,
					 1,  // Buffer scale
					 buffer_size,
					 0,  // Zero stuffing
					 muxinto.buffers_in_video,
					 muxinto.always_buffers_in_video);
	
	/* Skip to the end of the 1st AU (*2nd* Picture start!)
	*/
	AU_hdr = SEQUENCE_HEADER;
	AU_pict_data = 0;
	AU_start = 0;

    OutputSeqhdrInfo();

}
Esempio n. 15
0
/// Prefills the internal buffer for output multiplexing.
/// @param frames_to_buffer the number of audio frames to read ahead
void DTSStream::FillAUbuffer(unsigned int frames_to_buffer )
{
    unsigned int packet_samples;

	last_buffered_AU += frames_to_buffer;
	mjpeg_debug( "Scanning %d dts audio frames to frame %d", 
				 frames_to_buffer, last_buffered_AU );

	while( !bs.eos() && decoding_order < last_buffered_AU 
            && !muxinto.AfterMaxPTS(access_unit.PTS) )
	{
		int skip = access_unit.length - header_skip; 
        bs.SeekFwdBits(skip);
		prev_offset = AU_start;
		AU_start = bs.bitcount();

        if( AU_start - prev_offset != access_unit.length*8 )
        {
            mjpeg_warn( "Discarding incomplete final frame dts stream %d!",
                       stream_num);
            aunits.DropLast();
            decoding_order--;
            break;
        }

		/* Check if we have reached the end or have  another catenated 
		   stream to process before finishing ... */
		if ( (syncword = bs.GetBits(32))!=DTS_SYNCWORD )
		{
			if( !bs.eos()   )
			{
				mjpeg_error_exit1( "Can't find next dts frame: @ %lld we have %04x - broken bit-stream?", AU_start/8, syncword );
            }
            break;
		}

        bs.GetBits(6);         // additional sync
        bs.GetBits(1);         // CRC
        packet_samples = (bs.GetBits(7) + 1) * 32;         // pcm samples
        framesize = bs.GetBits(14) + 1;        // frame size

        bs.GetBits(6);              // audio channels
        bs.GetBits(4);              // sample rate code
        bs.GetBits(5);              // bitrate
        bs.GetBits(5);              // misc.

        access_unit.start = AU_start;
		access_unit.length = framesize;
		access_unit.PTS = static_cast<clockticks>(decoding_order) * 
			static_cast<clockticks>(packet_samples) * 
			static_cast<clockticks>(CLOCKS)	/ samples_per_second;
		access_unit.DTS = access_unit.PTS;
		access_unit.dorder = decoding_order;
		decoding_order++;
		aunits.Append( access_unit );
		num_frames++;

		num_syncword++;

		if (num_syncword >= old_frames+10 )
		{
			mjpeg_debug ("Got %d frame headers.", num_syncword);
			old_frames=num_syncword;
		}

    }
	last_buffered_AU = decoding_order;
	eoscan = bs.eos() || muxinto.AfterMaxPTS(access_unit.PTS);
}
Esempio n. 16
0
void y4munsharp(void)
	{
	int	i, row, col, diff, value;
	u_char	*i_ptr, *o_ptr;

	mjpeg_debug("Blurring Luma rows frame %d", frameno);

	for	(row = 0; row < yheight; row++)
		{
		blur_line(ctable_y, cmatrix_y, cmatrix_y_len, 
			&i_yuv[0][row * ywidth],
			&o_yuv[0][row * ywidth],
			ywidth);
		}

	if	(uv_radius != -1.0)
		{
		mjpeg_debug("Blurring Chroma rows frame %d", frameno);
		for	(row = 0; row < uvheight; row++)
			{
			blur_line(ctable_uv, cmatrix_uv, cmatrix_uv_len,
				&i_yuv[1][row * uvwidth],
				&o_yuv[1][row * uvwidth],
				uvwidth);
			blur_line(ctable_uv, cmatrix_uv, cmatrix_uv_len,
				&i_yuv[2][row * uvwidth],
				&o_yuv[2][row * uvwidth],
				uvwidth);
			}
		}
	else
		{
		memcpy(o_yuv[1], i_yuv[1], uvlen);
		memcpy(o_yuv[2], i_yuv[2], uvlen);
		}

	mjpeg_debug("Blurring Luma columns frame %d", frameno);
	for	(col = 0; col < ywidth; col++)
		{
/*
 * Do the entire frame if progressive, otherwise this does the only
 * the first field.
*/
		get_column(&o_yuv[0][col], cur_col,
			interlaced ? 2 * ywidth : ywidth,
			interlaced ? yheight / 2 : yheight);
		blur_line(ctable_y, cmatrix_y, cmatrix_y_len,
			cur_col,
			dest_col,
			interlaced ? yheight / 2 : yheight);
		put_column(dest_col, &o_yuv[0][col],
			interlaced ? 2 * ywidth : ywidth,
			interlaced ? yheight / 2 : yheight);

/*
 * If interlaced now process the second field (data source is offset 
 * by 'ywidth').
*/
		if	(interlaced)
			{
			get_column(&o_yuv[0][col + ywidth], cur_col,
				2 * ywidth,
				yheight / 2);
			blur_line(ctable_y, cmatrix_y, cmatrix_y_len,
				cur_col,
				dest_col,
				interlaced ? yheight / 2 : yheight);
			put_column(dest_col, &o_yuv[0][col + ywidth],
				2 * ywidth,
				yheight / 2);
			}
		}

	if	(uv_radius == -1)
		goto merging;

	mjpeg_debug("Blurring chroma columns frame %d", frameno);
	for	(col = 0; col < uvwidth; col++)
		{
/* U */
		get_column(&o_yuv[1][col], cur_col,
			interlaced ? 2 * uvwidth : uvwidth,
			interlaced ? uvheight / 2 : uvheight);
		blur_line(ctable_uv, cmatrix_uv, cmatrix_uv_len,
			cur_col,
			dest_col,
			interlaced ? uvheight / 2 : uvheight);
		put_column(dest_col, &o_yuv[1][col],
			interlaced ? 2 * uvwidth : uvwidth,
			interlaced ? uvheight / 2 : uvheight);
		if	(interlaced)
			{
			get_column(&o_yuv[1][col + uvwidth], cur_col,
				2 * uvwidth,
				uvheight / 2);
			blur_line(ctable_uv, cmatrix_uv, cmatrix_uv_len,
				cur_col,
				dest_col,
				interlaced ? uvheight / 2 : uvheight);
			put_column(dest_col, &o_yuv[1][col + uvwidth],
				2 * uvwidth,
				uvheight / 2);
			}
/* V */
		get_column(&o_yuv[2][col], cur_col,
			interlaced ? 2 * uvwidth : uvwidth,
			interlaced ? uvheight / 2 : uvheight);
		blur_line(ctable_uv, cmatrix_uv, cmatrix_uv_len,
			cur_col,
			dest_col,
			interlaced ? uvheight / 2 : uvheight);
		put_column(dest_col, &o_yuv[2][col],
			interlaced ? 2 * uvwidth : uvwidth,
			interlaced ? uvheight / 2 : uvheight);
		if	(interlaced)
			{
			get_column(&o_yuv[2][col + uvwidth], cur_col,
				2 * uvwidth,
				uvheight / 2);
			blur_line(ctable_uv, cmatrix_uv, cmatrix_uv_len,
				cur_col,
				dest_col,
				interlaced ? uvheight / 2 : uvheight);
			put_column(dest_col, &o_yuv[2][col + uvwidth],
				2 * uvwidth,
				uvheight / 2);
			}
		}
merging:
	mjpeg_debug("Merging luma frame %d", frameno);
	for	(row = 0, i_ptr = i_yuv[0], o_ptr = o_yuv[0]; row < yheight; row++)
		{
		for	(i = 0; i < ywidth; i++, i_ptr++, o_ptr++)
			{
			diff = *i_ptr - *o_ptr;
			if	(abs(2 * diff) < y_threshold)
				diff = 0;
			value = *i_ptr + (y_amount * diff);
/*
 * For video the limits are 16 and 235 for the luma rather than 0 and 255!
*/
			if	(value < lowy)
				value = lowy;
			else if	(value > highy)
				value = highy;
			*o_ptr = value;
			}
		}

	if	(uv_radius == -1.0)
		goto done;

	mjpeg_debug("Merging chroma frame %d", frameno);
	for	(row = 0, i_ptr = i_yuv[1], o_ptr = o_yuv[1]; row < uvheight; row++)
		{
		for	(i = 0; i < uvwidth; i++, i_ptr++, o_ptr++)
			{
			diff = *i_ptr - *o_ptr;
			if	(abs(2 * diff) < uv_threshold)
				diff = 0;
			value = *i_ptr + (uv_amount * diff);
/*
 * For video the limits are 16 and 240 for the chroma rather than 0 and 255!
*/
			if	(value < lowuv)
				value = lowuv;
			else if	(value > highuv)
				value = highuv;
			*o_ptr = value;
			}
		}
	for	(row = 0, i_ptr = i_yuv[2], o_ptr = o_yuv[2]; row < uvheight; row++)
		{
		for	(i = 0; i < uvwidth; i++, i_ptr++, o_ptr++)
			{
			diff = *i_ptr - *o_ptr;
			if	(abs(2 * diff) < uv_threshold)
				diff = 0;
			value = *i_ptr + (uv_amount * diff);
/*
 * For video the limits are 16 and 240 for the chroma rather than 0 and 255!
*/
			if	(value < 16)
				value = 16;
			else if	(value > highuv)
				value = highuv;
			*o_ptr = value;
			}
		}
done:
	return;
	}
Esempio n. 17
0
static
int read_ppm_frame(int fd, ppm_info_t *ppm,
		   uint8_t *buffers[], uint8_t *buffers2[],
		   int ilace, int ileave, int bgr)
{
  int width, height;
  static uint8_t *rowbuffer = NULL;
  int err;

  err = read_ppm_header(fd, &width, &height);
  if (err > 0) return 1;  /* EOF */
  if (err < 0) return -1; /* error */
  mjpeg_debug("Got PPM header:  %dx%d", width, height);

  if (ppm->width == 0) {
    /* first time */
    mjpeg_debug("Initializing PPM read_frame");
    ppm->width = width;
    ppm->height = height;
    rowbuffer = malloc(width * 3 * sizeof(rowbuffer[0]));
  } else {
    /* make sure everything matches */
    if ( (ppm->width != width) ||
	 (ppm->height != height) )
      mjpeg_error_exit1("One of these frames is not like the others!");
  }
  if (buffers[0] == NULL) 
    alloc_buffers(buffers, width, height);
  if ((buffers2[0] == NULL) && (ilace != Y4M_ILACE_NONE))
    alloc_buffers(buffers2, width, height);

  mjpeg_debug("Reading rows");

  if ((ilace != Y4M_ILACE_NONE) && (ileave)) {
    /* Interlaced and Interleaved:
       --> read image and deinterleave fields at same time */
    if (ilace == Y4M_ILACE_TOP_FIRST) {
      /* 1st buff arg == top field == temporally first == "buffers" */
      read_ppm_into_two_buffers(fd, buffers, buffers2,
				rowbuffer, width, height, bgr);
    } else {
      /* bottom-field-first */
      /* 1st buff art == top field == temporally second == "buffers2" */
      read_ppm_into_two_buffers(fd, buffers2, buffers,
				rowbuffer, width, height, bgr);
    }      
  } else if ((ilace == Y4M_ILACE_NONE) || (!ileave)) {
    /* Not Interlaced, or Not Interleaved:
       --> read image into first buffer... */
    read_ppm_into_one_buffer(fd, buffers, rowbuffer, width, height, bgr);
    if ((ilace != Y4M_ILACE_NONE) && (!ileave)) {
      /* ...Actually Interlaced:
	 --> read the second image/field into second buffer */
      err = read_ppm_header(fd, &width, &height);
      if (err > 0) return 1;  /* EOF */
      if (err < 0) return -1; /* error */
      mjpeg_debug("Got PPM header:  %dx%d", width, height);
      
      /* make sure everything matches */
      if ( (ppm->width != width) ||
	   (ppm->height != height) )
	mjpeg_error_exit1("One of these frames is not like the others!");
      read_ppm_into_one_buffer(fd, buffers2, rowbuffer, width, height, bgr);
    }
  }
  return 0;
}
Esempio n. 18
0
int main(int argc, char *argv[])
{
   int verbosity = 1;
   double time_between_frames = 0.0;
   double frame_rate = 0.0;
   struct timeval time_now;
   int n, frame;
   unsigned char *yuv[3];
   int in_fd = 0;
   int screenwidth=0, screenheight=0;
   y4m_stream_info_t streaminfo;
   y4m_frame_info_t frameinfo;
   int frame_width;
   int frame_height;
   int wait_for_sync = 1;
   char *window_title = NULL;

   while ((n = getopt(argc, argv, "hs:t:f:cv:")) != EOF) {
      switch (n) {
         case 'c':
            wait_for_sync = 0;
            break;
         case 's':
            if (sscanf(optarg, "%dx%d", &screenwidth, &screenheight) != 2) {
               mjpeg_error_exit1( "-s option needs two arguments: -s 10x10");
               exit(1);
            }
            break;
	  case 't':
	    window_title = optarg;
	    break;
	  case 'f':
		  frame_rate = atof(optarg);
		  if( frame_rate <= 0.0 || frame_rate > 200.0 )
			  mjpeg_error_exit1( "-f option needs argument > 0.0 and < 200.0");
		  break;
          case 'v':
	    verbosity = atoi(optarg);
	    if ((verbosity < 0) || (verbosity > 2))
	      mjpeg_error_exit1("-v needs argument from {0, 1, 2} (not %d)",
				verbosity);
	    break;
	  case 'h':
	  case '?':
            usage();
            exit(1);
            break;
         default:
            usage();
            exit(1);
      }
   }

   mjpeg_default_handler_verbosity(verbosity);

   y4m_accept_extensions(1);
   y4m_init_stream_info(&streaminfo);
   y4m_init_frame_info(&frameinfo);
   if ((n = y4m_read_stream_header(in_fd, &streaminfo)) != Y4M_OK) {
      mjpeg_error("Couldn't read YUV4MPEG2 header: %s!",
         y4m_strerr(n));
      exit (1);
   }

   switch (y4m_si_get_chroma(&streaminfo)) {
   case Y4M_CHROMA_420JPEG:
   case Y4M_CHROMA_420MPEG2:
   case Y4M_CHROMA_420PALDV:
     break;
   default:
     mjpeg_error_exit1("Cannot handle non-4:2:0 streams yet!");
   }

   frame_width = y4m_si_get_width(&streaminfo);
   frame_height = y4m_si_get_height(&streaminfo);

   if ((screenwidth <= 0) || (screenheight <= 0)) {
     /* no user supplied screen size, so let's use the stream info */
     y4m_ratio_t aspect = y4m_si_get_sampleaspect(&streaminfo);
       
     if (!(Y4M_RATIO_EQL(aspect, y4m_sar_UNKNOWN))) {
       /* if pixel aspect ratio present, use it */
#if 1
       /* scale width, but maintain height (line count) */
       screenheight = frame_height;
       screenwidth = frame_width * aspect.n / aspect.d;
#else
       if ((frame_width * aspect.d) < (frame_height * aspect.n)) {
	 screenwidth = frame_width;
	 screenheight = frame_width * aspect.d / aspect.n;
       } else {
	 screenheight = frame_height;
	 screenwidth = frame_height * aspect.n / aspect.d;
       }
#endif
     } else {
       /* unknown aspect ratio -- assume square pixels */
       screenwidth = frame_width;
       screenheight = frame_height;
     }
   }

   /* Initialize the SDL library */
   if( SDL_Init(SDL_INIT_VIDEO) < 0 ) {
      mjpeg_error("Couldn't initialize SDL: %s", SDL_GetError());
      exit(1);
   }

   /* set window title */
   SDL_WM_SetCaption(window_title, NULL);

   /* yuv params */
   yuv[0] = malloc(frame_width * frame_height * sizeof(unsigned char));
   yuv[1] = malloc(frame_width * frame_height / 4 * sizeof(unsigned char));
   yuv[2] = malloc(frame_width * frame_height / 4 * sizeof(unsigned char));

   screen = SDL_SetVideoMode(screenwidth, screenheight, 0, SDL_SWSURFACE);
   if ( screen == NULL ) {
      mjpeg_error("SDL: Couldn't set %dx%d: %s",
		  screenwidth, screenheight, SDL_GetError());
      exit(1);
   }
   else {
      mjpeg_debug("SDL: Set %dx%d @ %d bpp",
		  screenwidth, screenheight, screen->format->BitsPerPixel);
   }

   /* since IYUV ordering is not supported by Xv accel on maddog's system
    *  (Matrox G400 --- although, the alias I420 is, but this is not
    *  recognized by SDL), we use YV12 instead, which is identical,
    *  except for ordering of Cb and Cr planes...
    * we swap those when we copy the data to the display buffer...
    */
   yuv_overlay = SDL_CreateYUVOverlay(frame_width, frame_height,
				      SDL_YV12_OVERLAY,
				      screen);
   if ( yuv_overlay == NULL ) {
      mjpeg_error("SDL: Couldn't create SDL_yuv_overlay: %s",
		      SDL_GetError());
      exit(1);
   }
   if ( yuv_overlay->hw_overlay ) 
     mjpeg_debug("SDL: Using hardware overlay.");

   rect.x = 0;
   rect.y = 0;
   rect.w = screenwidth;
   rect.h = screenheight;

   SDL_DisplayYUVOverlay(yuv_overlay, &rect);

   signal (SIGINT, sigint_handler);

   frame = 0;
   if ( frame_rate == 0.0 ) 
   {
	   /* frame rate has not been set from command-line... */
	   if (Y4M_RATIO_EQL(y4m_fps_UNKNOWN, y4m_si_get_framerate(&streaminfo))) {
	     mjpeg_info("Frame-rate undefined in stream... assuming 25Hz!" );
	     frame_rate = 25.0;
	   } else {
	     frame_rate = Y4M_RATIO_DBL(y4m_si_get_framerate(&streaminfo));
	   }
   }
   time_between_frames = 1.e6 / frame_rate;

   gettimeofday(&time_now,0);

   while ((n = y4m_read_frame(in_fd, &streaminfo, &frameinfo, yuv)) == Y4M_OK && (!got_sigint)) {

      /* Lock SDL_yuv_overlay */
      if ( SDL_MUSTLOCK(screen) ) {
         if ( SDL_LockSurface(screen) < 0 ) break;
      }
      if (SDL_LockYUVOverlay(yuv_overlay) < 0) break;

      /* let's draw the data (*yuv[3]) on a SDL screen (*screen) */
      memcpy(yuv_overlay->pixels[0], yuv[0], frame_width * frame_height);
      memcpy(yuv_overlay->pixels[1], yuv[2], frame_width * frame_height / 4);
      memcpy(yuv_overlay->pixels[2], yuv[1], frame_width * frame_height / 4);

      /* Unlock SDL_yuv_overlay */
      if ( SDL_MUSTLOCK(screen) ) {
         SDL_UnlockSurface(screen);
      }
      SDL_UnlockYUVOverlay(yuv_overlay);

      /* Show, baby, show! */
      SDL_DisplayYUVOverlay(yuv_overlay, &rect);
      mjpeg_info("Playing frame %4.4d - %s",
		 frame, print_status(frame, frame_rate));

      if (wait_for_sync)
         while(get_time_diff(time_now) < time_between_frames) {
            usleep(1000);
         }
      frame++;

      gettimeofday(&time_now,0);
   }

   if ((n != Y4M_OK) && (n != Y4M_ERR_EOF))
      mjpeg_error("Couldn't read frame: %s", y4m_strerr(n));

   for (n=0; n<3; n++) {
      free(yuv[n]);
   }

   mjpeg_info("Played %4.4d frames (%s)",
	      frame, print_status(frame, frame_rate));

   SDL_FreeYUVOverlay(yuv_overlay);
   SDL_Quit();

   y4m_fini_frame_info(&frameinfo);
   y4m_fini_stream_info(&streaminfo);
   return 0;
}
Esempio n. 19
0
static int generate_YUV4MPEG(parameters_t *param)
{
  uint32_t frame;
  //size_t pngsize;
  char pngname[FILENAME_MAX];
  uint8_t *yuv[3];  /* buffer for Y/U/V planes of decoded PNG */
  y4m_stream_info_t streaminfo;
  y4m_frame_info_t frameinfo;

  if ((param->width % 2) == 0)
    param->new_width = param->width;
  else
    {
      param->new_width = ((param->width >> 1) + 1) << 1;
      printf("Setting new, even image width %d", param->new_width);
    }

  mjpeg_info("Now generating YUV4MPEG stream.");
  y4m_init_stream_info(&streaminfo);
  y4m_init_frame_info(&frameinfo);

  y4m_si_set_width(&streaminfo, param->new_width);
  y4m_si_set_height(&streaminfo, param->height);
  y4m_si_set_interlace(&streaminfo, param->interlace);
  y4m_si_set_framerate(&streaminfo, param->framerate);
  y4m_si_set_chroma(&streaminfo, param->ss_mode);

  yuv[0] = (uint8_t *)malloc(param->new_width * param->height * sizeof(yuv[0][0]));
  yuv[1] = (uint8_t *)malloc(param->new_width * param->height * sizeof(yuv[1][0]));
  yuv[2] = (uint8_t *)malloc(param->new_width * param->height * sizeof(yuv[2][0]));

  y4m_write_stream_header(STDOUT_FILENO, &streaminfo);

  for (frame = param->begin;
       (frame < param->numframes + param->begin) || (param->numframes == -1);
       frame++) 
    {
      //      if (frame < 25)
      //      else      
      //snprintf(pngname, sizeof(pngname), param->pngformatstr, frame - 25);
      snprintf(pngname, sizeof(pngname), param->pngformatstr, frame);
            
      raw0 = yuv[0];
      raw1 = yuv[1];
      raw2 = yuv[2];
      if (decode_png(pngname, 1, param) == -1)
	{
	  mjpeg_info("Read from '%s' failed:  %s", pngname, strerror(errno));
	  if (param->numframes == -1) 
	    {
	      mjpeg_info("No more frames.  Stopping.");
	      break;  /* we are done; leave 'while' loop */
	    } 
	  else 
	    {
	      mjpeg_info("Rewriting latest frame instead.");
	    }
	} 
      else 
	{
#if 0 
	  mjpeg_debug("Preparing frame");
	  
	  /* Now open this PNG file, and examine its header to retrieve the 
	     YUV4MPEG info that shall be written */

	  if ((param->interlace == Y4M_ILACE_NONE) || (param->interleave == 1)) 
	    {
	      mjpeg_info("Processing non-interlaced/interleaved %s.", 
			 pngname, pngsize);

	      decode_png(imagedata, 0, 420, yuv[0], yuv[1], yuv[2], 
			 param->width, param->height, param->new_width);
	      
#if 0 
	      if (param->make_z_alpha)
		{
		  mjpeg_info("Writing Z/Alpha data.\n");
		  za_write(real_z_imagemap, param->width, param->height,z_alpha_fp,frame);
		}
#endif
	    } 
	  else 
	    {
	      mjpeg_error_exit1("Can't handle interlaced PNG information (yet) since there is no standard for it.\n"
				"Use interleaved mode (-L option) to create interlaced material.");

	      switch (param->interlace) 
		{		  
		case Y4M_ILACE_TOP_FIRST:
		  mjpeg_info("Processing interlaced, top-first %s", pngname);
#if 0 
		  decode_jpeg_raw(jpegdata, jpegsize,
				  Y4M_ILACE_TOP_FIRST,
				  420, param->width, param->height,
				  yuv[0], yuv[1], yuv[2]);
#endif
		  break;
		case Y4M_ILACE_BOTTOM_FIRST:
		  mjpeg_info("Processing interlaced, bottom-first %s", pngname);
#if 0 
		  decode_jpeg_raw(jpegdata, jpegsize,
				  Y4M_ILACE_BOTTOM_FIRST,
				  420, param->width, param->height,
				  yuv[0], yuv[1], yuv[2]);
#endif
		  break;
		default:
		  mjpeg_error_exit1("FATAL logic error?!?");
		  break;
		}
	    }
#endif
	  mjpeg_debug("Converting frame to YUV format.");
	  /* Transform colorspace, then subsample (in place) */
	  convert_RGB_to_YCbCr(yuv, param->height *  param->new_width);
	  chroma_subsample(param->ss_mode, yuv, param->new_width, param->height);

	  mjpeg_debug("Frame decoded, now writing to output stream.");
	}
      
      mjpeg_debug("Frame decoded, now writing to output stream.");
      y4m_write_frame(STDOUT_FILENO, &streaminfo, &frameinfo, yuv);
    }

#if 0 
  if (param->make_z_alpha)
    {
      za_write_end(z_alpha_fp);
      fclose(z_alpha_fp);
    }
#endif

  y4m_fini_stream_info(&streaminfo);
  y4m_fini_frame_info(&frameinfo);
  free(yuv[0]);
  free(yuv[1]);
  free(yuv[2]);

  return 0;
}
Esempio n. 20
0
/*
 * readframe - read jpeg or dv frame into yuv buffer
 *
 * returns:
 *	0   success
 *	1   fatal error
 *	2   corrupt data encountered; 
 *		decoding can continue, but this frame may be damaged 
 */
int readframe(int numframe, 
	      uint8_t *frame[],
	      LavParam *param,
	      EditList el)
{
  int len, i, res, data_format;
  uint8_t *frame_tmp;
  int warn;
  warn = 0;

  if (MAX_JPEG_LEN < el.max_frame_size) {
    mjpeg_error_exit1( "Max size of JPEG frame = %ld: too big",
		       el.max_frame_size);
  }
  
  len = el_get_video_frame(jpeg_data, numframe, &el);
  data_format = el_video_frame_data_format(numframe, &el);
  
  switch(data_format) {

  case DATAFORMAT_DV2 :
#ifndef HAVE_LIBDV
    mjpeg_error("DV input was not configured at compile time");
    res = 1;
#else
    mjpeg_debug("DV frame %d   len %d",numframe,len);
    res = 0;
    dv_parse_header(decoder, jpeg_data);
    switch(decoder->sampling) {
    case e_dv_sample_420:
      /* libdv decodes PAL DV directly as planar YUV 420
       * (YV12 or 4CC 0x32315659) if configured with the flag
       * --with-pal-yuv=YV12 which is not (!) the default
       */
      if (libdv_pal_yv12 == 1) {
	pitches[0] = decoder->width;
	pitches[1] = decoder->width / 2;
	pitches[2] = decoder->width / 2;
	if (pitches[0] != param->output_width ||
	    pitches[1] != param->chroma_width) {
	  mjpeg_error("for DV 4:2:0 only full width output is supported");
	  res = 1;
	} else {
	  dv_decode_full_frame(decoder, jpeg_data, e_dv_color_yuv,
			       frame, (int *)pitches);
	  /* swap the U and V components */
	  frame_tmp = frame[2];
	  frame[2] = frame[1];
	  frame[1] = frame_tmp;
	}
	break;
      }
    case e_dv_sample_411:
    case e_dv_sample_422:
      /* libdv decodes NTSC DV (native 411) and by default also PAL
       * DV (native 420) as packed YUV 422 (YUY2 or 4CC 0x32595559)
       * where the U and V information is repeated.  This can be
       * transformed to planar 420 (YV12 or 4CC 0x32315659).
       * For NTSC DV this transformation is lossy.
       */
      pitches[0] = decoder->width * 2;
      pitches[1] = 0;
      pitches[2] = 0;
      if (decoder->width != param->output_width) {
	mjpeg_error("for DV only full width output is supported");
	res = 1;
      } else {
	dv_decode_full_frame(decoder, jpeg_data, e_dv_color_yuv,
			     dv_frame, (int *)pitches);
	frame_YUV422_to_planar(frame, dv_frame[0],
			       decoder->width,	decoder->height,
			       param->chroma);
      }
      break;
    default:
      res = 1;
      break;
    }
#endif /* HAVE_LIBDV */
    break;

  case DATAFORMAT_YUV420 :
  case DATAFORMAT_YUV422 :
    mjpeg_debug("raw YUV frame %d   len %d",numframe,len);
    frame_tmp = jpeg_data;
    memcpy(frame[0], frame_tmp, param->luma_size);
    frame_tmp += param->luma_size;
    memcpy(frame[1], frame_tmp, param->chroma_size);
    frame_tmp += param->chroma_size;
    memcpy(frame[2], frame_tmp, param->chroma_size);
    res = 0;
    break;

  default:
    mjpeg_debug("MJPEG frame %d   len %d",numframe,len);
    res = decode_jpeg_raw(jpeg_data, len, el.video_inter,
			  param->chroma,
			  param->output_width, param->output_height,
			  frame[0], frame[1], frame[2]);
  }
  
  if (res < 0) {
    mjpeg_warn( "Fatal Error Decoding Frame %d", numframe);
    return 1;
  } else if (res == 1) {
    mjpeg_warn( "Decoding of Frame %d failed", numframe);
    warn = 1;
    res = 0;
  }
  
  
  if (param->mono) {
    for (i = 0;
	 i < param->chroma_size;
	 ++i) {
      frame[1][i] = 0x80;
      frame[2][i] = 0x80;
    }
  }

  if(warn)
	  return 2;
  else
	  return 0;
}
Esempio n. 21
0
int main(int argc, char **argv)
{
  cl_info_t cl;
  y4m_stream_info_t sinfo;
  y4m_frame_info_t finfo;
  uint8_t *buffers[Y4M_MAX_NUM_PLANES];  /* R'G'B' or Y'CbCr */
  uint8_t *buffers2[Y4M_MAX_NUM_PLANES]; /* R'G'B' or Y'CbCr */
  ppm_info_t ppm;
  int field_height;

  int fdout = 1;
  int err, i, count, repeating_last;

  y4m_accept_extensions(1);
  y4m_init_stream_info(&sinfo);
  y4m_init_frame_info(&finfo);

  parse_args(&cl, argc, argv);

  ppm.width = 0;
  ppm.height = 0;
  for (i = 0; i < 3; i++) {
    buffers[i] = NULL;
    buffers2[i] = NULL;
  }

  /* Read first PPM frame/field-pair, to get dimensions */
  if (read_ppm_frame(cl.fdin, &ppm, buffers, buffers2, 
		     cl.interlace, cl.interleave, cl.bgr))
    mjpeg_error_exit1("Failed to read first frame.");

  /* Setup streaminfo and write output header */
  setup_output_stream(fdout, &cl, &sinfo, &ppm, &field_height);

  /* Loop 'framecount' times, or possibly forever... */
  for (count = 0, repeating_last = 0;
       (count < (cl.offset + cl.framecount)) || (cl.framecount == 0);
       count++) {

    if (repeating_last) goto WRITE_FRAME;

    /* Read PPM frame/field */
    /* ...but skip reading very first frame, already read prior to loop */
    if (count > 0) {
      err = read_ppm_frame(cl.fdin, &ppm, buffers, buffers2, 
			   cl.interlace, cl.interleave, cl.bgr);
      if (err == 1) {
	/* clean input EOF */
	if (cl.repeatlast) {
	  repeating_last = 1;
	  goto WRITE_FRAME;
	} else if (cl.framecount != 0) {
	  mjpeg_error_exit1("Input frame shortfall (only %d converted).",
			    count - cl.offset);
	} else {
	  break;  /* input is exhausted; we are done!  time to go home! */
	}
      } else if (err)
	mjpeg_error_exit1("Error reading ppm frame");
    }
    
    /* ...skip transforms if we are just going to skip this frame anyway.
       BUT, if 'cl.repeatlast' is on, we must process/buffer every frame,
       because we don't know when we will see the last one. */
    if ((count >= cl.offset) || (cl.repeatlast)) {
      /* Transform colorspace, then subsample (in place) */
      convert_RGB_to_YCbCr(buffers, ppm.width * field_height);
      chroma_subsample(cl.ss_mode, buffers, ppm.width, field_height);
      if (cl.interlace != Y4M_ILACE_NONE) {
	convert_RGB_to_YCbCr(buffers2, ppm.width * field_height);
	chroma_subsample(cl.ss_mode, buffers2, ppm.width, field_height);
      }
    }

  WRITE_FRAME:
    /* Write converted frame to output */
    if (count >= cl.offset) {
      switch (cl.interlace) {
      case Y4M_ILACE_NONE:
	if ((err = y4m_write_frame(fdout, &sinfo, &finfo, buffers)) != Y4M_OK)
	  mjpeg_error_exit1("Write frame failed: %s", y4m_strerr(err));
	break;
      case Y4M_ILACE_TOP_FIRST:
	if ((err = y4m_write_fields(fdout, &sinfo, &finfo, buffers, buffers2))
	    != Y4M_OK)
	  mjpeg_error_exit1("Write fields failed: %s", y4m_strerr(err));
	break;
      case Y4M_ILACE_BOTTOM_FIRST:
	if ((err = y4m_write_fields(fdout, &sinfo, &finfo, buffers2, buffers))
	    != Y4M_OK)
	  mjpeg_error_exit1("Write fields failed: %s", y4m_strerr(err));
	break;
      default:
	mjpeg_error_exit1("Unknown ilace type!   %d", cl.interlace);
	break;
      }
    }
  } 


  for (i = 0; i < 3; i++) {
    free(buffers[i]);
    free(buffers2[i]);
  }
  y4m_fini_stream_info(&sinfo);
  y4m_fini_frame_info(&finfo);

  mjpeg_debug("Done.");
  return 0;
}
Esempio n. 22
0
void MPAStream::FillAUbuffer(unsigned int frames_to_buffer )
{
	unsigned int padding_bit;
	last_buffered_AU += frames_to_buffer;

    if( eoscan )
        return;

    mjpeg_debug( "Scanning %d MPA frames to frame %d", 
                frames_to_buffer,
                last_buffered_AU );
	while( !bs.eos() 
           && decoding_order < last_buffered_AU 
           && !muxinto.AfterMaxPTS(access_unit.PTS) )
	{

		int skip=access_unit.length-4;
        bs.SeekFwdBits( skip );
		prev_offset = AU_start;
		AU_start = bs.bitcount();
        if( AU_start - prev_offset != access_unit.length*8 )
        {
            mjpeg_warn("Discarding incomplete final frame MPEG audio stream %02x!",
                       stream_id
                       );
            aunits.DropLast();
            --decoding_order;
            break;
        }
		/* Check we have reached the end of have  another catenated 
		   stream to process before finishing ... */
		if ( (syncword = bs.GetBits( 11))!=AUDIO_SYNCWORD )
		{
            //
            // Handle a broken last frame...
			if( !bs.eos()   )
			{
                mjpeg_warn( "Data follows end of last recogniseable MPEG audio frame - bad stream?");
                eoscan = true;
                return;
			}
            break;
		}
		// Skip version_id:2, layer:2, protection:1
		(void) bs.GetBits( 5);
		int rate_code	= bs.GetBits( 4);
		// Skip frequency
		(void) bs.GetBits( 2);

		padding_bit=bs.Get1Bit();
		access_unit.start = AU_start;
		access_unit.length = SizeFrame( rate_code, padding_bit );
		access_unit.PTS = static_cast<clockticks>(decoding_order) * static_cast<clockticks>(mpa_samples[layer]) * static_cast<clockticks>(CLOCKS)
			/ samples_per_second;
		access_unit.DTS = access_unit.PTS;
		access_unit.dorder = decoding_order;
		decoding_order++;
		aunits.Append( access_unit );
		num_frames[padding_bit]++;

		bs.GetBits( 9);
		
		num_syncword++;

		if (num_syncword >= old_frames+10 )
		{
			mjpeg_debug ("Got %d frame headers.", num_syncword);
			old_frames=num_syncword;
		
		}
	


    }
	last_buffered_AU = decoding_order;
	eoscan = bs.eos() || muxinto.AfterMaxPTS(access_unit.PTS);
}