コード例 #1
0
ファイル: ppmtoy4m.c プロジェクト: AquaSoftGmbH/mjpeg
static
int read_ppm_header(int fd, int *width, int *height)
{
  char s[6];
  int incomment;
  int n;
  int maxval = 0;
  
  *width = 0;
  *height = 0;

  /* look for "P6" */
  n = y4m_read(fd, s, 3);
  if (n > 0) 
    return 1;  /* EOF */

  if ((n < 0) || (strncmp(s, "P6", 2)))
    mjpeg_error_exit1("Bad Raw PPM magic!");

  incomment = 0;
  DO_SKIP_WHITESPACE();
  DO_READ_NUMBER(*width);
  DO_SKIP_WHITESPACE();
  DO_READ_NUMBER(*height);
  DO_SKIP_WHITESPACE();
  DO_READ_NUMBER(maxval);

  if (maxval != 255)
    mjpeg_error_exit1("Expecting maxval == 255, not %d!", maxval);

  return 0;
}
コード例 #2
0
/** Here we set the color to use for filling the area */
void set_yuvcolor(char area[20], struct color_yuv *coloryuv)
{
int i;
unsigned int u1, u2, u3;

  i = sscanf (area, "%i,%i,%i", &u1, &u2, &u3);

  if ( 3 == i )
    {
      (*coloryuv).luma    = u1;
      (*coloryuv).chroma_b = u2;
      (*coloryuv).chroma_r = u3;
      if ( ((*coloryuv).luma > 235) || ((*coloryuv).luma < 16) )
        mjpeg_error_exit1("out of range value for luma given: %i, \n"
                          " allowed values 16-235", (*coloryuv).luma);
      if ( ((*coloryuv).chroma_b > 240) || ((*coloryuv).chroma_b < 16) )
        mjpeg_error_exit1("out of range value for Cb given: %i, \n"
                          " allowed values 16-240", (*coloryuv).chroma_b);
      if ( ((*coloryuv).chroma_r > 240) || ((*coloryuv).chroma_r < 16) )
        mjpeg_error_exit1("out of range value for Cr given: %i, \n"
                          " allowed values 16-240", (*coloryuv).chroma_r);

       mjpeg_info("got luma %i, Cb %i, Cr %i ", 
                 (*coloryuv).luma, (*coloryuv).chroma_b, (*coloryuv).chroma_r );
    }
  else 
    mjpeg_error_exit1("Wrong number of colors given, %s", area);
}
コード例 #3
0
/** Here we cut out the number of the area string */
void fillarea(char area[20], struct area_s *inarea)
{
int i;
unsigned int u1, u2, u3, u4;

  /* Cuting out the numbers of the stream */
  i = sscanf (area, "%ix%i+%i+%i", &u1, &u2, &u3, &u4);

  if ( 4 == i)  /* Checking if we have got 4 numbers */
    {
       (*inarea).width = u1;
       (*inarea).height = u2;
       (*inarea).hoffset = u3;
       (*inarea).voffset = u4;

      if ( (((*inarea).width % 2) != 0) || (((*inarea).height % 2) != 0) ||
           (((*inarea).hoffset% 2)!= 0) || (((*inarea).voffset% 2) != 0)   )
        mjpeg_error_exit1("At least one argument no even number");

      if (verbose >= 1)
         mjpeg_info("got the area : W %i, H %i, Xoff %i, Yoff %i",
         (*inarea).width,(*inarea).height,(*inarea).hoffset,(*inarea).voffset); 
    }
  else 
    mjpeg_error_exit1("Wrong inactive sting given: %s", area);

}
コード例 #4
0
ファイル: ppmtoy4m.c プロジェクト: AquaSoftGmbH/mjpeg
static
void read_ppm_into_two_buffers(int fd,
			       uint8_t *buffers[], 
			       uint8_t *buffers2[], 
			       uint8_t *rowbuffer,
			       int width, int height, int bgr)
{
  int x, y;
  uint8_t *pixels;
  uint8_t *R = buffers[0];
  uint8_t *G = buffers[1];
  uint8_t *B = buffers[2];
  uint8_t *R2 = buffers2[0];
  uint8_t *G2 = buffers2[1];
  uint8_t *B2 = buffers2[2];

  mjpeg_debug("read into two buffers, %dx%d", width, height);
  height /= 2;
  for (y = 0; y < height; y++) {
    pixels = rowbuffer;
    if (y4m_read(fd, pixels, width * 3))
      mjpeg_error_exit1("read error A  y=%d", y);
    if (bgr) {
      for (x = 0; x < width; x++) {
	*(B++) = *(pixels++);
	*(G++) = *(pixels++);
	*(R++) = *(pixels++);
      }
    } else {
      for (x = 0; x < width; x++) {
	*(R++) = *(pixels++);
	*(G++) = *(pixels++);
	*(B++) = *(pixels++);
      }
    }
    pixels = rowbuffer;
    if (y4m_read(fd, pixels, width * 3))
      mjpeg_error_exit1("read error B  y=%d", y);
    if (bgr) {
      for (x = 0; x < width; x++) {
	*(B2++) = *(pixels++);
	*(G2++) = *(pixels++);
	*(R2++) = *(pixels++);
      }
    } else {
      for (x = 0; x < width; x++) {
	*(R2++) = *(pixels++);
	*(G2++) = *(pixels++);
	*(B2++) = *(pixels++);
      }
    }
  }
}
コード例 #5
0
bool VCDStillsStream::MuxPossible(clockticks currentSCR)
{
    if( bufmodel.Size() < au_unsent )
    {
        mjpeg_error_exit1( "Illegal VCD still: larger than maximum permitted by its buffering parameters!");
    }
	if (RunOutComplete() ||	bufmodel.Space() < au_unsent)
	{
		return false;
	}
	
	if( LastSectorLastAU() )
	{
		if( sibling != 0 )
        {
            if( !stream_mismatch_warned && sibling->NextAUType() != NOFRAME  )
            {
                mjpeg_warn( "One VCD stills stream runs significantly longer than the other!");
                mjpeg_warn( "Simultaneous stream ending recommended by standard not possible" );
                return true;
            }
            return sibling->MuxCompleted() || sibling->LastSectorLastAU();
        }
        else
            return true;
	}
	else
		return true;
}
コード例 #6
0
ファイル: aunitbuffer.hpp プロジェクト: AquaSoftGmbH/mjpeg
	inline void DropLast()
		{
			if( buf.empty() )
				mjpeg_error_exit1( "INTERNAL ERROR: droplast empty AU buffer" );
			buf.pop_back();
			
		}
コード例 #7
0
int FileOutputStream::Open()
{
  char msg[512];
   while( !(strm = fopen( cur_filename, "wb" )) ){
      if( errno == ENOSPC
#ifndef __MINGW32__
                          || errno == EDQUOT
#endif
                                             ){
         ADM_assert(snprintf(msg,512,"can't open \"%s\": %s\n%s\n",
                             cur_filename,
                             (errno==ENOSPC?"filesystem full":"quota exceeded"),
                             "Please free up some space and press RETRY to try again.")!=-1);
         mutex_slaveThread_problem.lock();
           kind_of_slaveThread_problem = ADM_strdup(msg);
           cond_slaveThread_problem->wait(); /* implicit mutex_slaveThread_problem.unlock(); */
           ADM_dealloc(kind_of_slaveThread_problem);
           kind_of_slaveThread_problem = NULL;
         if( kind_of_slaveThread_problem_rc == 0 ){ /* ignore */
            /* it doesn't make any sense to continue */
            mjpeg_error_exit1( "Could not open for writing: %s", cur_filename );
         }
      }else{
         fprintf(stderr,"can't open \"%s\": %u (%s)\n", cur_filename, errno, strerror(errno));
         ADM_assert(0);
      }
   }
   strm_fd = fileno(strm);
   return 0;
}
コード例 #8
0
/** init_parse_files
 * Verifies the PNG input files and prepares YUV4MPEG header information.
 * @returns 0 on success
 */
static int init_parse_files(parameters_t *param)
{ 
  char pngname[255];

  snprintf(pngname, sizeof(pngname), 
	   param->pngformatstr, param->begin);
  mjpeg_debug("Analyzing %s to get the right pic params", pngname);
  
  if (decode_png(pngname, 0, param) == -1)
    mjpeg_error_exit1("Reading of %s failed.\n", pngname);

  mjpeg_info("Image dimensions are %ux%u",
	     param->width, param->height);
  
  mjpeg_info("Movie frame rate is:  %f frames/second",
	     Y4M_RATIO_DBL(param->framerate));

  switch (param->interlace) 
    {
    case Y4M_ILACE_NONE:
      mjpeg_info("Non-interlaced/progressive frames.");
      break;
    case Y4M_ILACE_BOTTOM_FIRST:
      mjpeg_info("Interlaced frames, bottom field first.");      
      break;
    case Y4M_ILACE_TOP_FIRST:
      mjpeg_info("Interlaced frames, top field first.");      
      break;
    default:
      mjpeg_error_exit1("Interlace has not been specified (use -I option)");
      break;
    }

  if ((param->interlace != Y4M_ILACE_NONE) && (param->interleave == -1))
    mjpeg_error_exit1("Interleave has not been specified (use -L option)");

  if (!(param->interleave) && (param->interlace != Y4M_ILACE_NONE)) 
    {
      param->height *= 2;
      mjpeg_info("Non-interleaved fields (image height doubled)");
    }
  mjpeg_info("Frame size:  %u x %u", param->width, param->height);

  return 0;
}
コード例 #9
0
/* Aligns values on 16 byte boundaries (to aid in vectorization) */
static void *my_malloc(size_t size)
{
    void *tmp = malloc(size+15);
    unsigned long addr;
    if (tmp == NULL)
	    mjpeg_error_exit1("malloc(%ld) failed\n", (long)size);
    addr=(unsigned long)tmp;
    addr=(addr+15)&(-16);    
    return (void *)addr;
}
コード例 #10
0
ファイル: ppmtoy4m.c プロジェクト: AquaSoftGmbH/mjpeg
static
void setup_output_stream(int fdout, cl_info_t *cl,
			 y4m_stream_info_t *sinfo, ppm_info_t *ppm,
			 int *field_height) 
{
  int err;
  int x_alignment = y4m_chroma_ss_x_ratio(cl->ss_mode).d;
  int y_alignment = y4m_chroma_ss_y_ratio(cl->ss_mode).d;
    
  if (cl->interlace != Y4M_ILACE_NONE) 
    y_alignment *= 2;

  if ((ppm->width % x_alignment) != 0) {
    mjpeg_error_exit1("PPM width (%d) is not a multiple of %d!",
		      ppm->width, x_alignment);
  }
  if ((ppm->height % y_alignment) != 0) {
    mjpeg_error_exit1("PPM height (%d) is not a multiple of %d!",
		      ppm->height, y_alignment);
  }

  y4m_si_set_width(sinfo, ppm->width);
  if (cl->interlace == Y4M_ILACE_NONE) {
    y4m_si_set_height(sinfo, ppm->height);
    *field_height = ppm->height;
  } else if (cl->interleave) {
    y4m_si_set_height(sinfo, ppm->height);
    *field_height = ppm->height / 2;
  } else {
    y4m_si_set_height(sinfo, ppm->height * 2);
    *field_height = ppm->height;
  }
  y4m_si_set_sampleaspect(sinfo, cl->aspect);
  y4m_si_set_interlace(sinfo, cl->interlace);
  y4m_si_set_framerate(sinfo, cl->framerate);
  y4m_si_set_chroma(sinfo, cl->ss_mode);

  if ((err = y4m_write_stream_header(fdout, sinfo)) != Y4M_OK)
    mjpeg_error_exit1("Write header failed:  %s", y4m_strerr(err));

  mjpeg_info("Output Stream parameters:");
  y4m_log_stream_info(mjpeg_loglev_t("info"), "  ", sinfo);
}
コード例 #11
0
void VCDStillsStream::SetSibling( VCDStillsStream *_sibling )
{
	assert( _sibling != 0 );
	sibling = _sibling;
	if( sibling->stream_id == stream_id )
	{
		mjpeg_error_exit1("VCD mixed stills stream cannot contain two streams of the same type!");
	}

}
コード例 #12
0
/* main
 * in: argc, argv:  Classic commandline parameters. 
 * returns: int: 0: success, !0: !success :-)
 */
int main(int argc, char ** argv)
{ 
  parameters_t param;
  sh_param = &param;

  y4m_accept_extensions(1);

  parse_commandline(argc, argv, &param);
  mjpeg_default_handler_verbosity(param.verbose);

  mjpeg_info("Parsing & checking input files.");
  if (init_parse_files(&param)) {
    mjpeg_error_exit1("* Error processing the PNG input.");
  }

  if (generate_YUV4MPEG(&param)) { 
    mjpeg_error_exit1("* Error processing the input files.");
  }

  return 0;
}
コード例 #13
0
void
FileOutputStream::Write( uint8_t *buf, unsigned int len )
{
  uint8_t *p = buf;
  unsigned int plen = len;
  int rc;
   ADM_assert(strm_fd != -1);
   while( (rc=write(strm_fd,p,plen)) != plen ){
      if( rc > 0 ){
         p+=rc;
         plen-=rc;
         continue;
      }
      if( rc == -1 && (errno == ENOSPC
#ifndef __MINGW32__
                                       || errno == EDQUOT
#endif
                                                          ) ){
        char msg[512];
         fprintf(stderr,"slaveThread: we have a problem. errno=%u\n",errno);
         ADM_assert(snprintf(msg,512,"can't write to file \"%s\": %s\n%s\n",
                             cur_filename,
                             (errno==ENOSPC?"filesystem full":"quota exceeded"),
                             "Please free up some space and press RETRY to try again.")!=-1);
         mutex_slaveThread_problem.lock();
           kind_of_slaveThread_problem = ADM_strdup(msg);
           cond_slaveThread_problem->wait(); /* implicit mutex_slaveThread_problem.unlock(); */
           ADM_dealloc(kind_of_slaveThread_problem);
           kind_of_slaveThread_problem = NULL;
         if( kind_of_slaveThread_problem_rc == 0 ){ /* ignore */
            /* it doesn't make any sense to continue */
            mjpeg_error_exit1( "Failed write: %s", cur_filename );
         }
      }else{
         mjpeg_error_exit1( "Failed write: %s", cur_filename );
      }
   }
}
コード例 #14
0
ファイル: ysSource.C プロジェクト: AquaSoftGmbH/mjpeg
void ysSource::fake_progressive(FakeProg_t f)
{
  if (f) {
    if ( (_stream.interlace() == Y4M_UNKNOWN) ||
	 (_stream.interlace() == Y4M_ILACE_NONE) ) {
      mjpeg_error_exit1("Must be interlaced to fake non-interlaced!");
    }
    if (_stream.interlace() == Y4M_ILACE_MIXED) 
      mjpeg_error_exit1("Cannot fake non-interlaced from mixed-mode source!");
    if (f == FAKE_TOP_ONLY)
      mjpeg_info("Faking non-interlaced source; using top field only.");
    else
      mjpeg_info("Faking non-interlaced source; using bottom field only.");
    _fake_progressive = f;
    _real_stream = _stream;
    _stream.interlace(Y4M_ILACE_NONE);
    _stream.y_size(_stream.y_size() / 2);
    _stream.sar(_stream.sar() / 2);  // ????????
    if (_fake_field[0] != NULL) delete[] _fake_field[0];
    _fake_field[0] = new uint8_t[_real_stream.fielddim().area()];
    _fake_field[1] = _fake_field[2] = _fake_field[0];
  }
}
コード例 #15
0
ファイル: utilyuv.c プロジェクト: silicontrip/lavtools
int parse_interlacing(char *str)
{
	if (str[0] != '\0' && str[1] == '\0')
	{
		switch (str[0])
		{
			case 'p':
				return Y4M_ILACE_NONE;
			case 't':
				return Y4M_ILACE_TOP_FIRST;
			case 'b':
				return Y4M_ILACE_BOTTOM_FIRST;
		}
	}
	mjpeg_error_exit1("Valid interlacing modes are: p - progressive, t - top-field first, b - bottom-field first");
	return Y4M_UNKNOWN; /* to avoid compiler warnings */
}
コード例 #16
0
ファイル: subsample_image.c プロジェクト: AquaSoftGmbH/mjpeg
void subsample_image_altivec_verify(SUBSAMPLE_IMAGE_PDECL)
{
    int width, height;
    unsigned long checksum44_1, checksum44_2;
    unsigned long checksum22_1, checksum22_2;
    unsigned char *cpy22, *cpy44;

    width = rowstride;
    height = (unsigned long)(sub22_image - image) / rowstride;

    cpy22 = (unsigned char*)malloc((width/2) * (height/2));
    cpy44 = (unsigned char*)malloc((width/4) * (height/4));
    if (cpy22 == NULL || cpy44 == NULL)
	mjpeg_error_exit1("subsample_image: malloc failed");

    subsample_image_altivec(SUBSAMPLE_IMAGE_ARGS);
    checksum22_1 = checksum(sub22_image, width/2, height/2, rowstride/2);
    checksum44_1 = checksum(sub44_image, width/4, height/4, rowstride/4);

    /* copy data for imgcmp */
    imgcpy(cpy22, sub22_image, width/2, height/2, rowstride/2);
    imgcpy(cpy44, sub44_image, width/4, height/4, rowstride/4);

    ALTIVEC_TEST_WITH(subsample_image)(SUBSAMPLE_IMAGE_ARGS);
    checksum22_2 = checksum(sub22_image, width/2, height/2, rowstride/2);
    checksum44_2 = checksum(sub44_image, width/4, height/4, rowstride/4);

    if (checksum22_1 != checksum22_2 || checksum44_1 != checksum44_2) {
	mjpeg_debug("subsample_image(" SUBSAMPLE_IMAGE_PFMT ")",
	    SUBSAMPLE_IMAGE_ARGS);
	if (checksum22_1 != checksum22_2)
	    mjpeg_debug("subsample_image: %s checksums differ %d != %d",
		"2*2", checksum22_1, checksum22_2);
	if (checksum44_1 != checksum44_2)
	    mjpeg_debug("subsample_image: %s checksums differ %d != %d",
		"4*4", checksum44_1, checksum44_2);

	imgcmp("2*2", cpy22, sub22_image, width/2, height/2, rowstride/2);
	imgcmp("4*4", cpy44, sub44_image, width/4, height/4, rowstride/4);
    }

    free(cpy22);
    free(cpy44);
}
コード例 #17
0
void png_separation(png_structp png_ptr, png_row_infop row_info, png_bytep data)
{
  int row_nr = png_ptr->row_number; // internal variable ? 
  int i, width = row_info->width; 
  int new_width = sh_param->new_width;

  /* contents of row_info:
   *  png_uint_32 width      width of row
   *  png_uint_32 rowbytes   number of bytes in row
   *  png_byte color_type    color type of pixels
   *  png_byte bit_depth     bit depth of samples
   *  png_byte channels      number of channels (1-4)
   *  png_byte pixel_depth   bits per pixel (depth*channels)
   */

  //mjpeg_debug("PNG YUV transformation callback; color_type is %d row_number %d\n", 
  //	 row_info->color_type, row_nr);

  if(row_info->color_type == PNG_COLOR_TYPE_GRAY) // only Z available
    {
      //mjpeg_debug("Grayscale to YUV, row %d", row_nr);
      for (i = 0; i < width; i++)
	{
	  raw0[i + row_nr * new_width] = data[i];
	  raw1[i + row_nr * new_width] = data[i];
	  raw2[i + row_nr * new_width] = data[i];
	}
      return;
    }

  if(row_info->color_type == PNG_COLOR_TYPE_RGB) // Z and Alpha available
    {
      //mjpeg_info("RGB to YUV, row %d", row_nr);
      for (i = 0; i < width; i++)
	{
	  raw0[i + row_nr * new_width] = data[i*3];
	  raw1[i + row_nr * new_width] = data[i*3 + 1];
	  raw2[i + row_nr * new_width] = data[i*3 + 2];
	}
      return;
    }

  mjpeg_error_exit1("mpegz: UNKNOWN COLOR FORMAT %d in PNG transformation !\n", row_info->color_type);
}
コード例 #18
0
ファイル: ppmtoy4m.c プロジェクト: AquaSoftGmbH/mjpeg
static
void parse_args(cl_info_t *cl, int argc, char **argv)
{
  int c;

  cl->offset = 0;
  cl->framecount = 0;
  cl->aspect = y4m_sar_SQUARE;
  cl->interlace = Y4M_ILACE_NONE;
  cl->framerate = y4m_fps_NTSC;
  cl->interleave = 0;
  cl->repeatlast = 0;
  cl->ss_mode = DEFAULT_CHROMA_MODE;
  cl->verbosity = 1;
  cl->bgr = 0;
  cl->fdin = fileno(stdin); /* default to stdin */

  while ((c = getopt(argc, argv, "BA:F:I:Lo:n:rS:v:h")) != -1) {
    switch (c) {
    case 'A':
      if (y4m_parse_ratio(&(cl->aspect), optarg) != Y4M_OK) {
	mjpeg_error("Could not parse ratio:  '%s'", optarg);
	goto ERROR_EXIT;
      }
      break;
    case 'F':
      if (y4m_parse_ratio(&(cl->framerate), optarg) != Y4M_OK) {
	mjpeg_error("Could not parse ratio:  '%s'", optarg);
	goto ERROR_EXIT;
      }
      break;
    case 'I':
      switch (optarg[0]) {
      case 'p':  cl->interlace = Y4M_ILACE_NONE;  break;
      case 't':  cl->interlace = Y4M_ILACE_TOP_FIRST;  break;
      case 'b':  cl->interlace = Y4M_ILACE_BOTTOM_FIRST;  break;
      default:
	mjpeg_error("Unknown value for interlace: '%c'", optarg[0]);
	goto ERROR_EXIT;
	break;
      }
      break;
    case 'L':
      cl->interleave = 1;
      break;
    case 'B':
      cl->bgr = 1;
      break;
    case 'o':
      if ((cl->offset = atoi(optarg)) < 0)
	mjpeg_error_exit1("Offset must be >= 0:  '%s'", optarg);
      break;
    case 'n':
      if ((cl->framecount = atoi(optarg)) < 0)
	mjpeg_error_exit1("Frame count must be >= 0:  '%s'", optarg);
      break;
    case 'r':
      cl->repeatlast = 1;
      break;
    case 'S':
      cl->ss_mode = y4m_chroma_parse_keyword(optarg);
      if (cl->ss_mode == Y4M_UNKNOWN) {
	mjpeg_error("Unknown subsampling mode option:  %s", optarg);
	goto ERROR_EXIT;
      } else if (!chroma_sub_implemented(cl->ss_mode)) {
	mjpeg_error("Unsupported subsampling mode option:  %s", optarg);
	goto ERROR_EXIT;
      }
      break;
    case 'v':
      cl->verbosity = atoi(optarg);
      if ((cl->verbosity < 0) || (cl->verbosity > 2))
	mjpeg_error("Verbosity must be 0, 1, or 2:  '%s'", optarg);
      break;
    case 'h':
      usage(argv[0]);
      exit(0);
      break;
    case '?':
    default:
      goto ERROR_EXIT;
      break;
    }
  }
  /* optional remaining argument is a filename */
  if (optind == (argc - 1)) {
    if ((cl->fdin = open(argv[optind], O_RDONLY | O_BINARY)) == -1)
      mjpeg_error_exit1("Failed to open '%s':  %s",
			argv[optind], strerror(errno));
  } else if (optind != argc) 
    goto ERROR_EXIT;


  mjpeg_default_handler_verbosity(cl->verbosity);

  mjpeg_info("Command-line Parameters:");
  mjpeg_info("             framerate:  %d:%d",
	     cl->framerate.n, cl->framerate.d);
  mjpeg_info("    pixel aspect ratio:  %d:%d",
	     cl->aspect.n, cl->aspect.d);
  mjpeg_info("         pixel packing:  %s",
	     cl->bgr?"BGR":"RGB");
  mjpeg_info("             interlace:  %s%s",
	     mpeg_interlace_code_definition(cl->interlace),
	     (cl->interlace == Y4M_ILACE_NONE) ? "" :
	     (cl->interleave) ? " (interleaved PPM input)" :
	     " (field-sequential PPM input)");
  mjpeg_info("        starting frame:  %d", cl->offset);
  if (cl->framecount == 0)
    mjpeg_info("           # of frames:  all%s",
	       (cl->repeatlast) ? ", repeat last frame forever" :
	       ", until input exhausted");
  else
    mjpeg_info("           # of frames:  %d%s",
	       cl->framecount,
	       (cl->repeatlast) ? ", repeat last frame until done" :
	       ", or until input exhausted");
  mjpeg_info("    chroma subsampling:  %s",
	     y4m_chroma_description(cl->ss_mode));

  /* DONE! */
  return;

 ERROR_EXIT:
  mjpeg_error("For usage hints, use option '-h'.  Please take a hint.");
  exit(1);

}
コード例 #19
0
ファイル: aunitbuffer.hpp プロジェクト: AquaSoftGmbH/mjpeg
	void Append( AUnit &rec )
	{
		if( buf.size() >= BUF_SIZE_SANITY )
			mjpeg_error_exit1( "INTERNAL ERROR: AU buffer overflow" );
		buf.push_back( new AUnit(rec) );
	}
コード例 #20
0
ファイル: y4mhist.c プロジェクト: tufei/y4m.js
int
main(int argc, char **argv)
	{
	int	i, fdin, ss_v, ss_h, chroma_ss, textout;
	int 	do_vectorscope;
	int	pwidth, pheight; /* Needed for the vectorscope */
	int	plane0_l, plane1_l, plane2_l;
	u_char	*yuv[3], *cp;
#ifdef	HAVE_SDLgfx
	int	j;
	int temp_x, temp_y;
	u_char	*cpx, *cpy;
#endif
	y4m_stream_info_t istream;
	y4m_frame_info_t iframe;

	do_vectorscope = 0;
	scalepercent = 0;

#ifdef	HAVE_SDLgfx
	textout = 0;
#else
	textout = 1;
#endif

	while	((i = getopt(argc, argv, "tps:")) != EOF)
		{
		switch	(i)
			{
			case	't':
				textout = 1;
				break;
			case	'p':
				scalepercent = 1;
				break;
			case	's':
				do_vectorscope = atoi(optarg);
				break;
			default:
				usage();
			}
		}

#ifdef HAVE_SDLgfx
	if ( (do_vectorscope < 0) || (do_vectorscope >16) )
		usage();

	/* Initialize SDL */
	desired_bpp = 8; 
	video_flags = 0;
	video_flags |= SDL_DOUBLEBUF;
	number_of_frames = 1;

	memset(fy_stats, '\0', sizeof (fy_stats));
	memset(ly_stats, '\0', sizeof (ly_stats));

        if	( SDL_Init(SDL_INIT_VIDEO) < 0 ) 
                mjpeg_error_exit1("Couldn't initialize SDL:%s",SDL_GetError());
        atexit(SDL_Quit);                       /* Clean up on exit */
        /* Initialize the display */
	if (do_vectorscope == 0)
	        screen = SDL_SetVideoMode(width,heigth,desired_bpp,video_flags);
	else
	        screen=SDL_SetVideoMode(width_v,heigth,desired_bpp,video_flags);

        if	(screen == NULL)
                mjpeg_error_exit1("Couldn't set %dx%dx%d video mode: %s",
                                width, heigth, desired_bpp, SDL_GetError());

	SDL_WM_SetCaption("y4mhistogram", "y4mhistogram");

	y4m_init_area(screen); /* Here we draw the basic layout */
#endif /* HAVE_SDLgfx */

	fdin = fileno(stdin);

	y4m_accept_extensions(1);

	y4m_init_stream_info(&istream);
	y4m_init_frame_info(&iframe);

	if	(y4m_read_stream_header(fdin, &istream) != Y4M_OK)
		mjpeg_error_exit1("stream header error");

        if      (y4m_si_get_plane_count(&istream) != 3)
                mjpeg_error_exit1("Only 3 plane formats supported");

	pwidth = y4m_si_get_width(&istream);
	pheight = y4m_si_get_height(&istream);
	chroma_ss = y4m_si_get_chroma(&istream);
	ss_h = y4m_chroma_ss_x_ratio(chroma_ss).d;
	ss_v = y4m_chroma_ss_y_ratio(chroma_ss).d;


	plane0_l = y4m_si_get_plane_length(&istream, 0);
	plane1_l = y4m_si_get_plane_length(&istream, 1);
	plane2_l = y4m_si_get_plane_length(&istream, 2);

	yuv[0] = malloc(plane0_l);
	if	(yuv[0] == NULL)
		mjpeg_error_exit1("malloc(%d) plane 0", plane0_l);
	yuv[1] = malloc(plane1_l);
	if	(yuv[1] == NULL)
		mjpeg_error_exit1(" malloc(%d) plane 1", plane1_l);
	yuv[2] = malloc(plane2_l);
	if	(yuv[2] == NULL)
		mjpeg_error_exit1(" malloc(%d) plane 2\n", plane2_l);

	while	(y4m_read_frame(fdin,&istream,&iframe,yuv) == Y4M_OK)
		{
		for	(i = 0, cp = yuv[0]; i < plane0_l; i++, cp++)
			y_stats[*cp]++; /* Y' */
		for	(i = 0, cp = yuv[1]; i < plane1_l; i++, cp++)
			u_stats[*cp]++;	/* U */
		for	(i = 0, cp = yuv[2]; i < plane2_l; i++, cp++)
			v_stats[*cp]++;	/* V */
#ifdef HAVE_SDLgfx
			
		if (do_vectorscope >= 1 )
		{
		
		for (i=0; i<260; i++) /* Resetting the vectorfield */
			for (j=0;j<260;j++)
				vectorfield[i][j]=0;

		cpx = yuv[1];
		cpy = yuv[2];

		for (i=0; i < (pheight/ss_h); i++)
			{
			for (j = 0; j < (pwidth/ss_v); j++)
				{
					cpx++;
					cpy++;

					/* Have no idea why I have to multiply it with that values
					   But than the vectorsscope works correct. If someone has
						a explantion or better fix tell me. Bernhard */
					temp_x = round( 128+ ((*cpx-128) * 0.7857) );
					temp_y = round( 128+ ((*cpy-128) * 1.1143) );
					vectorfield[temp_x][temp_y*-1]=1;
				}

				/* Here we got to the n'th next line if needed */
				i   = i + (do_vectorscope-1);
				cpy = cpy + (pwidth/ss_v) * (do_vectorscope-1);
				cpx = cpx + (pwidth/ss_v) * (do_vectorscope-1);
			}

		}
		make_stat(); /* showing the sats */

		SDL_UpdateRect(screen,0,0,0,0); /* updating all */

		/* Events for SDL */
		HandleEvent();
#endif
		}
	y4m_fini_frame_info(&iframe);
	y4m_fini_stream_info(&istream);

	if	(textout)
		{
		for	(i = 0; i < 255; i++)
			printf("Y %d %lld\n", i, y_stats[i]);
		for	(i = 0; i < 255; i++)
			printf("U %d %lld\n", i, u_stats[i]);
		for	(i = 0; i < 255; i++)
			printf("V %d %lld\n", i, v_stats[i]);
		}
	exit(0);
	}
コード例 #21
0
ファイル: iquant_intra.c プロジェクト: jlehtine/yuvmotionfps
void iquant_intra_m1_altivec(IQUANT_INTRA_PDECL)
{
    int i;
    vector signed short vsrc;
    uint16_t *qmat;
    vector unsigned short vqmat;
    vector unsigned short vmquant;
    vector bool short eqzero, ltzero;
    vector signed short val, t0;
    vector signed short zero, one;
    vector unsigned int four;
    vector signed short min, max;
    int offset, offset2;
    int16_t dst0;
    union {
	vector unsigned short vu16;
	unsigned short mquant;
	vector signed int vs32;
	struct {
	    signed int pad[3];
	    signed int sum;
	} s;
    } vu;
#ifdef ALTIVEC_DST
    DataStreamControl dsc;
#endif

#ifdef ALTIVEC_VERIFY /* {{{ */
    if (NOT_VECTOR_ALIGNED(wsp->intra_q_mat))
	mjpeg_error_exit1("iquant_intra_m1: wsp->intra_q_mat %% 16 != 0, (%d)",
	    wsp->intra_q_mat);

    if (NOT_VECTOR_ALIGNED(src))
	mjpeg_error_exit1("iquant_intra_m1: src %% 16 != 0, (%d)", src);

    if (NOT_VECTOR_ALIGNED(dst))
	mjpeg_error_exit1("iquant_intra_m1: dst %% 16 != 0, (%d)", dst);

    for (i = 0; i < 64; i++)
	if (src[i] < -256 || src[i] > 255)
	    mjpeg_error_exit1("iquant_intra_m2: -256 > src[%i] > 255, (%d)",
		i, src[i]);
#endif /* }}} */

    AMBER_START;

    dst0 = src[0] << (3 - dc_prec);

    qmat = (uint16_t*)wsp->intra_q_mat;

#ifdef ALTIVEC_DST
    dsc.control = DATA_STREAM_CONTROL(64/8,1,0);
    vec_dst(src, dsc.control, 0);
    vec_dst(qmat, dsc.control, 1);
#endif

    /* vmquant = (vector unsigned short)(mquant); */
    vu.mquant = (unsigned short)mquant;
    vmquant = vec_splat(vu.vu16, 0);

    zero = vec_splat_s16(0);
    one = vec_splat_s16(1);
    four = vec_splat_u32(4);
    /* max = (2047); min = (-2048); {{{ */
    vu8(max) = vec_splat_u8(0x7);
    t0 = vec_splat_s16(-1); /* 0xffff */
    vu8(max) = vec_mergeh(vu8(max), vu8(t0)); /* 0x07ff == 2047 */
    min = vec_sub(t0, max);
    /* }}} */
    offset = 0;

#if 1
    vsrc = vec_ld(offset, (signed short*)src);
    vqmat = vec_ld(offset, (unsigned short*)qmat);
    i = (64/8) - 1;
    do {
	/* intra_q[i] * mquant */
	vu16(vqmat) = vec_mulo(vu8(vqmat), vu8(vmquant));

	/* save sign */
	ltzero = vec_cmplt(vsrc, zero);
	eqzero = vec_cmpeq(vsrc, zero);

	/* val = abs(src) */
	t0 = vec_sub(zero, vsrc);
	val = vec_max(t0, vsrc);

	/* val = (src * quant) >> 4 */
	vs32(t0) = vec_mule(val, vs16(vqmat));
	vs32(val) = vec_mulo(val, vs16(vqmat));
	vs32(t0) = vec_sra(vs32(t0), four);
	vs16(t0) = vec_pack(vs32(t0), vs32(t0));
	vs32(val) = vec_sra(vs32(val), four);
	vs16(val) = vec_pack(vs32(val), vs32(val));
	val = vec_mergeh(vs16(t0), vs16(val));

	offset2 = offset;
	offset += 8*sizeof(int16_t);
	vsrc = vec_ld(offset, (signed short*)src);
	vqmat = vec_ld(offset, (unsigned short*)qmat);

	/* val = val - 1&~(val|val==0) */
	t0 = vec_or(val, eqzero);
	t0 = vec_andc(one, t0);
	val = vec_sub(val, t0);

	/* restore sign */
	t0 = vec_sub(zero, val);
	val = vec_sel(val, t0, ltzero);

	/* val = (val > 2047) ? ((val < -2048) ? -2048 : val); */
	val = vec_min(val, max);
	val = vec_max(val, min);

	vec_st(val, offset2, dst);
    } while (--i);
    /* intra_q[i] * mquant */
    vu16(vqmat) = vec_mulo(vu8(vqmat), vu8(vmquant));

    /* save sign */
    ltzero = vec_cmplt(vsrc, zero);
    eqzero = vec_cmpeq(vsrc, zero);

    /* val = abs(src) */
    t0 = vec_sub(zero, vsrc);
    val = vec_max(t0, vsrc);

    /* val = (src * quant) >> 4 */
    vs32(t0) = vec_mule(val, vs16(vqmat));
    vs32(val) = vec_mulo(val, vs16(vqmat));
    vs32(t0) = vec_sra(vs32(t0), four);
    vs16(t0) = vec_pack(vs32(t0), vs32(t0));
    vs32(val) = vec_sra(vs32(val), four);
    vs16(val) = vec_pack(vs32(val), vs32(val));
    val = vec_mergeh(vs16(t0), vs16(val));

    /* val = val - 1&~(val|val==0) */
    t0 = vec_or(val, eqzero);
    t0 = vec_andc(one, t0);
    val = vec_sub(val, t0);

    /* restore sign */
    t0 = vec_sub(zero, val);
    val = vec_sel(val, t0, ltzero);

    /* val = (val > 2047) ? ((val < -2048) ? -2048 : val); */
    val = vec_min(val, max);
    val = vec_max(val, min);

    vec_st(val, offset, dst);
#else
    /* {{{ */
    i = (64/8);
    do {
	vsrc = vec_ld(offset, (signed short*)src);
	vqmat = vec_ld(offset, (unsigned short*)qmat);

	/* intra_q[i] * mquant */
	vu16(vqmat) = vec_mulo(vu8(vqmat), vu8(vmquant));

	/* save sign */
	ltzero = vec_cmplt(vsrc, zero);
	eqzero = vec_cmpeq(vsrc, zero);

	/* val = abs(src) */
	t0 = vec_sub(zero, vsrc);
	val = vec_max(t0, vsrc);

	/* val = (src * quant) >> 4 */
	vs32(t0) = vec_mule(val, vs16(vqmat));
	vs32(val) = vec_mulo(val, vs16(vqmat));
	vs32(t0) = vec_sra(vs32(t0), four);
	vs16(t0) = vec_pack(vs32(t0), vs32(t0));
	vs32(val) = vec_sra(vs32(val), four);
	vs16(val) = vec_pack(vs32(val), vs32(val));
	val = vec_mergeh(vs16(t0), vs16(val));

	/* val = val - 1&~(val|val==0) */
	t0 = vec_or(val, eqzero);
	t0 = vec_andc(one, t0);
	val = vec_sub(val, t0);

	/* restore sign */
	t0 = vec_sub(zero, val);
	val = vec_sel(val, t0, ltzero);

	/* val = (val > 2047) ? ((val < -2048) ? -2048 : val); */
	val = vec_min(val, max);
	val = vec_max(val, min);

	vec_st(val, offset, dst);

	offset += 8*sizeof(int16_t);
    } while (--i);
    /* }}} */
#endif

    dst[0] = dst0;

    AMBER_STOP;
}
コード例 #22
0
void StillsStream::Init ( )
{
	int stream_id = -1;
	int buffer_size = -1;

	SetBufSize( 4*1024*1024 );
	ScanFirstSeqHeader();

	mjpeg_debug( "Stills: Video buffer suggestion ignored!" );
	switch( muxinto.mux_format )
	{
	case  MPEG_FORMAT_VCD_STILL :
		if( horizontal_size > 352 )
		{
			stream_id = VIDEO_STR_0+2 ;
			buffer_size = vbv_buffer_size*2048;
			mjpeg_info( "Stills Stream %02x: high-resolution VCD stills %d KB each", 
						stream_id,
						buffer_size );
			if( buffer_size < 46*1024 )
				mjpeg_error_exit1( "I Can't multiplex high-res stills smaller than normal res stills - sorry!");

		}
		else
		{
			stream_id = VIDEO_STR_0+1 ;
			buffer_size = 46*1024;
			mjpeg_info( "Stills Stream %02x: normal VCD stills", stream_id );
		}
		break;
	case MPEG_FORMAT_SVCD_STILL :
		if( horizontal_size > 480 )
		{
			stream_id = VIDEO_STR_0+1;
			buffer_size = 230*1024;
			mjpeg_info( "Stills Stream %02x: high-resolution SVCD stills.", 
						stream_id );
		}
		else
		{
			stream_id = VIDEO_STR_0+1 ;
			buffer_size = 230*1024;
			mjpeg_info( "Stills Stream %02x: normal-resolution SVCD stills.", stream_id );
		}
		break;
	default:
		mjpeg_error_exit1( "Only SVCD and VCD Still currently supported");
	}


	MuxStream::Init( stream_id,
					 1,  // Buffer scale
					 buffer_size,
					 0,  // Zero stuffing
					 muxinto.buffers_in_video,
					 muxinto.always_buffers_in_video);
	
	/* Skip to the end of the 1st AU (*2nd* Picture start!)
	*/
	AU_hdr = SEQUENCE_HEADER;
	AU_pict_data = 0;
	AU_start = 0;

    OutputSeqhdrInfo();

}
コード例 #23
0
ファイル: add_pred.c プロジェクト: AquaSoftGmbH/mjpeg
/*
 * add prediction and prediction error, saturate to 0...255
 * pred % 8 == 0
 * cur % 8 == 0
 * lx % 16 == 0
 * blk % 16 == 0
 */
void add_pred_altivec(ADD_PRED_PDECL)
{
#ifdef ALTIVEC_DST
    unsigned int dst;
#endif
    uint8_t *pCA, *pCB, *pPA, *pPB;
    int16_t *pBA, *pBB;
    vector unsigned char zero;
    vector unsigned char predA, predB, curA, curB;
    vector signed short blkA, blkB;


#ifdef ALTIVEC_VERIFY
    if (NOT_VECTOR_ALIGNED(lx))
	mjpeg_error_exit1("add_pred: lx %% 16 != 0, (%d)", lx);

    if (NOT_VECTOR_ALIGNED(blk))
	mjpeg_error_exit1("add_pred: blk %% 16 != 0, (%d)", blk);

#ifdef ALTIVEC_DST
    if (lx & (~0xffff) != 0)
	mjpeg_error_exit1("add_pred: lx=%d > vec_dst range", lx);
#endif

    if (((unsigned long)pred & 0xf) != ((unsigned long)cur & 0xf))
	mjpeg_error_exit1("add_pred: (pred(0x%X) %% 16) != (cur(0x%X) %% 16)",
		pred, cur);
    if ((((unsigned long)pred) & 0x7) != 0)
	mjpeg_error_exit1("add_pred: pred %% 8 != 0, (0x%X)", pred);
    if ((((unsigned long)cur) & 0x7) != 0)
	mjpeg_error_exit1("add_pred: cur %% 8 != 0, (0x%X)", cur);
#endif

/* MACROS expand differently depending on input */
#define ABBA(symbol,ab)		_ABBA(ABBA_##ab,symbol) /* {{{ */
#define _ABBA(abba_ab,symbol)	abba_ab(symbol)
#define ABBA_A(symbol)		symbol##B
#define ABBA_B(symbol)		symbol##A
/* }}} */
#define HLLH(symbol,hl)		_HLLH(HLLH_##hl,symbol) /* {{{ */
#define _HLLH(hllh_hl,symbol)	hllh_hl(symbol)
#define HLLH_h(symbol)		symbol##l
#define HLLH_l(symbol)		symbol##h
/* }}} */
#define PACKSU(hl,st,ld)	_PACKSU(PACKSU_##hl,st,ld) /* {{{ */
#define _PACKSU(psu,st,ld)	psu(st,ld)
#define PACKSU_h(st,ld)		vec_packsu(st,ld)
#define PACKSU_l(st,ld)		vec_packsu(ld,st)
/* }}} */


#define	PERFORM_ITERATION(hl,ab,iter) /* iter {{{ */                         \
	pred##ab = vec_merge##hl(zero, pred##ab);                            \
	cur##ab = HLLH(vec_merge,hl)(zero, cur##ab);                         \
	blk##ab = vec_add(blk##ab, vs16(pred##ab));                          \
	blk##ab = vec_max(blk##ab, vs16(zero));                              \
	cur##ab = PACKSU(hl, vu16(blk##ab), vu16(cur##ab));                  \
	vec_st(cur##ab, 0, pC##ab);                                          \
	/* }}} */

#define PREPARE_ITERATION(hl,ab,iter) /* iter {{{ */                         \
	pP##ab = ABBA(pP,ab) + lx;                                           \
	pC##ab = ABBA(pC,ab) + lx;                                           \
	pB##ab = ABBA(pB,ab) + 8;                                            \
	pred##ab = vec_ld(0, pP##ab);                                        \
	cur##ab = vec_ld(0, pC##ab);                                         \
	blk##ab = vec_ld(0, pB##ab);                                         \
	/* }}} */

#define NO_RESCHEDULE	asm volatile ("") 

    AMBER_START;

    pPA = pred;
    pCA = cur;
    pBA = blk;

#ifdef ALTIVEC_DST
    dst = 0x01080000 | lx;
    vec_dst(pPA, dst, 0);
    vec_dst(pCA, dst, 1);
    dst = 0x01080010;
    vec_dst(pBA, dst, 2);
#endif

    predA = vec_ld(0, pPA);
    curA = vec_ld(0, pCA);  NO_RESCHEDULE;
    pPB = pPA + lx;         NO_RESCHEDULE;
    blkA = vec_ld(0, pBA);  NO_RESCHEDULE;
    pCB = pCA + lx;         NO_RESCHEDULE;
    predB = vec_ld(0, pPB); NO_RESCHEDULE;
    pBB = pBA + 8;          NO_RESCHEDULE;
    curB = vec_ld(0, pCB);  NO_RESCHEDULE;
    zero = vec_splat_u8(0); NO_RESCHEDULE;
    blkB = vec_ld(0, pBB);


    if (VECTOR_ALIGNED(pPA)) {
	PERFORM_ITERATION(h,A,0);
	PREPARE_ITERATION(h,A,2);   /* prepare next A iteration */
	PERFORM_ITERATION(h,B,1);
	PREPARE_ITERATION(h,B,3);   /* prepare next B iteration */
	PERFORM_ITERATION(h,A,2);
	PREPARE_ITERATION(h,A,4);
	PERFORM_ITERATION(h,B,3);
	PREPARE_ITERATION(h,B,5);
	PERFORM_ITERATION(h,A,4);
	PREPARE_ITERATION(h,A,6);
	PERFORM_ITERATION(h,B,5);
	PREPARE_ITERATION(h,B,7);
	PERFORM_ITERATION(h,A,6);
	PERFORM_ITERATION(h,B,7);
    } else {
	PERFORM_ITERATION(l,A,0);
	PREPARE_ITERATION(l,A,2);   /* prepare next A iteration */
	PERFORM_ITERATION(l,B,1);
	PREPARE_ITERATION(l,B,3);   /* prepare next B iteration */
	PERFORM_ITERATION(l,A,2);
	PREPARE_ITERATION(l,A,4);
	PERFORM_ITERATION(l,B,3);
	PREPARE_ITERATION(l,B,5);
	PERFORM_ITERATION(l,A,4);
	PREPARE_ITERATION(l,A,6);
	PERFORM_ITERATION(l,B,5);
	PREPARE_ITERATION(l,B,7);
	PERFORM_ITERATION(l,A,6);
	PERFORM_ITERATION(l,B,7);
    }

#ifdef ALTIVEC_DST
    vec_dssall();
#endif

    AMBER_STOP;
}
コード例 #24
0
ファイル: main.c プロジェクト: jlehtine/yuvmotionfps
int
main (int argc, char *argv[])
{
  extern char *optarg;
  int cpucap = cpu_accel ();
  char c;
  int fd_in = 0;
  int fd_out = 1;
  int errno = 0;
  int have_framerate = 0;
  int force_interlacing = 0;
  y4m_frame_info_t iframeinfo;
  y4m_stream_info_t istreaminfo;
  y4m_frame_info_t oframeinfo;
  y4m_stream_info_t ostreaminfo;
  int output_frame_number = 0;
  int input_frame_number = 0;
  y4m_ratio_t output_frame_rate, input_frame_rate, frame_rate_ratio;
  float ratio = 0;		// input/output, output should be > input )
  int scene_change;
  y4m_ratio_t ratio_percent_frame;
  float percent_threshold = 0.02;

/* percent_threshold is there to avoid interpolating frames when the output frame
 * is very close to an input frame
 */

  mjpeg_log (LOG_INFO, "-------------------------------------------------");
  mjpeg_log (LOG_INFO, "   Motion-Compensating-Frame-Rate-Converter     ");
  mjpeg_log (LOG_INFO, "-------------------------------------------------");

  while ((c = getopt (argc, argv, "hvb:p:r:t:s:f")) != -1)
    {
      switch (c)
	{
	case 'h':
	  {
	    mjpeg_log (LOG_INFO, "Usage ");
	    mjpeg_log (LOG_INFO, "-------------------------");
	    mjpeg_log (LOG_INFO, "  This program converts frame rates");
	    mjpeg_log (LOG_INFO,
		       "with a smart algorithm that estimates the motion of the elements");
	    mjpeg_log (LOG_INFO,
		       "to smooth the motion, rather than duplicating frames.");
	    mjpeg_log (LOG_INFO,
		       "  It's way smoother, but introduces a bit of blocking and/or");
	    mjpeg_log (LOG_INFO,
		       " maybe blurryness when things move too fast.");
	    mjpeg_log (LOG_INFO, " ");
	    mjpeg_log (LOG_INFO,
		       " -r Frame rate for the resulting stream (in X:Y fractional form)");
	    mjpeg_log (LOG_INFO,
		       " -b block size (default = 8, will be rounded to even number )");
	    mjpeg_log (LOG_INFO,
		       " -p search path radius (default = 8, do not use high values ~ > 20)");
	    mjpeg_log (LOG_INFO,
		       "-t frame approximation threshold (default=50, higher=better)");
	    mjpeg_log (LOG_INFO,
		       "-s scene change threshold (default=8, 0=disable scene change detection)");
	    mjpeg_log (LOG_INFO,
		       "-r Frame rate for the resulting stream (in X:Y fractional form)");
	    mjpeg_log (LOG_INFO,
		       " -f force processing interlaced input (don't know what it does)");

	    mjpeg_log (LOG_INFO, " -v verbose/debug");

	    exit (0);
	    break;
	  }
	case 'v':
	  {
	    verbose = 1;
	    break;
	  }
	case 'f':
	  {
	    force_interlacing = 1;
	    break;
	  }
	case 'b':
	  {
	    block_size = strtol (optarg, (char **) NULL, 10);
	    /* we only want even block sizes */
	    if (block_size % 1 != 0)
	      {
		block_size = block_size + 1;
		mjpeg_log (LOG_WARN, "Block size changed to %d", block_size);
	      }
	    else
	      mjpeg_log (LOG_INFO, "Block size: %d", block_size);
	    break;
	  }
	case 'p':
	  {
	    search_path_radius = strtol (optarg, (char **) NULL, 10);	/* safer atoi */
	    mjpeg_log (LOG_INFO, "Search radius %d", search_path_radius);

	    break;
	  }
	case 'r':
	  {
	    if (Y4M_OK != y4m_parse_ratio (&output_frame_rate, optarg))
	      mjpeg_error_exit1
		("Syntax for frame rate should be Numerator:Denominator");


	    mjpeg_log (LOG_INFO, "New Frame rate %d:%d",
		       output_frame_rate.n, output_frame_rate.d);
	    have_framerate = 1;
	    break;
	  }
	case 't':
	  {
	    percent_threshold = strtol (optarg, (char **) NULL, 10);
	    if ((percent_threshold > 1) && (percent_threshold <= 1024))
	      percent_threshold = 1.0 / percent_threshold;
	    else
	      mjpeg_error_exit1 ("Threshold should be between 2 and 1024");

	    mjpeg_log (LOG_INFO, "Approximation threshold %d",
		       (int) ((float) 1.0 / percent_threshold));
	    break;

	  }
	case 's':
	  {
	    scene_change_threshold = strtol (optarg, (char **) NULL, 10);
	    if (scene_change_threshold == 0)
	      mjpeg_log (LOG_INFO, "Scene change detection disabled");
	    else
	      mjpeg_log (LOG_INFO, "Scene change threshold: %d00 percent",
			 scene_change_threshold);
	    break;

	  }
	}
    }

  if (!have_framerate)
    {
      mjpeg_error_exit1
	("Please specify a frame rate; yuvmotionfps -h for more info");
    }

  /* initialize motion_library */
  init_motion_search ();

  /* initialize MMX transforms (fixme) */
  if ((cpucap & ACCEL_X86_MMXEXT) != 0 || (cpucap & ACCEL_X86_SSE) != 0)
    {
#if 0
      mjpeg_log (LOG_INFO,
		 "FIXME: could use MMX/SSE Block/Frame-Copy/Blend if I had one ;-)");
#endif
    }

  /* initialize stream-information */
  y4m_accept_extensions (1);
  y4m_init_stream_info (&istreaminfo);
  y4m_init_frame_info (&iframeinfo);
  y4m_init_stream_info (&ostreaminfo);
  y4m_init_frame_info (&oframeinfo);

  /* open input stream */
  if ((errno = y4m_read_stream_header (fd_in, &istreaminfo)) != Y4M_OK)
    {
      mjpeg_log (LOG_ERROR, "Couldn't read YUV4MPEG header: %s!",
		 y4m_strerr (errno));
      exit (1);
    }

  /* get format information */
  width = y4m_si_get_width (&istreaminfo);
  height = y4m_si_get_height (&istreaminfo);
  input_chroma_subsampling = y4m_si_get_chroma (&istreaminfo);
  mjpeg_log (LOG_INFO, "Y4M-Stream is %ix%i(%s)",
	     width,
	     height,
	     input_chroma_subsampling ==
	     Y4M_CHROMA_420JPEG ? "4:2:0 MPEG1" : input_chroma_subsampling
	     ==
	     Y4M_CHROMA_420MPEG2 ? "4:2:0 MPEG2" :
	     input_chroma_subsampling ==
	     Y4M_CHROMA_420PALDV ? "4:2:0 PAL-DV" :
	     input_chroma_subsampling ==
	     Y4M_CHROMA_444 ? "4:4:4" : input_chroma_subsampling ==
	     Y4M_CHROMA_422 ? "4:2:2" : input_chroma_subsampling ==
	     Y4M_CHROMA_411 ? "4:1:1 NTSC-DV" : input_chroma_subsampling
	     ==
	     Y4M_CHROMA_MONO ? "MONOCHROME" : input_chroma_subsampling ==
	     Y4M_CHROMA_444ALPHA ? "4:4:4:4 ALPHA" : "unknown");

  /* if chroma-subsampling isn't supported bail out ... */
  switch (input_chroma_subsampling)
    {
    case Y4M_CHROMA_420JPEG:
      break;
    case Y4M_CHROMA_420PALDV:
    case Y4M_CHROMA_420MPEG2:
    case Y4M_CHROMA_411:
      mjpeg_log (LOG_WARN,
		 "This chroma subsampling mode has not been thoroughly tested");
      break;
    default:

      mjpeg_error_exit1
	("Y4M-Stream is not 4:2:0. Other chroma-modes currently not allowed. Sorry.");
    }

  /* the output is progressive 4:2:0 MPEG 1 */
  y4m_si_set_interlace (&ostreaminfo, Y4M_ILACE_NONE);
  y4m_si_set_chroma (&ostreaminfo, Y4M_CHROMA_420JPEG);
  y4m_si_set_width (&ostreaminfo, width);
  y4m_si_set_height (&ostreaminfo, height);
  y4m_si_set_sampleaspect (&ostreaminfo,
			   y4m_si_get_sampleaspect (&istreaminfo));

  input_frame_rate = y4m_si_get_framerate (&istreaminfo);

  y4m_si_set_framerate (&ostreaminfo, output_frame_rate);

  if (width % block_size != 0)
    {
      mjpeg_log (LOG_WARN,
		 "Warning, stream width(%d) is not a multiple of block_size (%d)",
		 width, block_size);
      mjpeg_log (LOG_WARN,
		 "The right side of the image might not be what you want");
    }
  if (height % block_size != 0)
    {
      mjpeg_log (LOG_WARN,
		 "Warning, stream height(%d) is not a multiple of block_size (%d)",
		 height, block_size);
      mjpeg_log (LOG_WARN,
		 "The lower side of the image might not be what you want");
    }



  /* Calculate the different ratios:
   * ratio is (input framerate / output framerate)
   * ratio_percent_frame is the fractional representation of percent frame
   */
  frame_rate_ratio.n = input_frame_rate.n * output_frame_rate.d;
  frame_rate_ratio.d = input_frame_rate.d * output_frame_rate.n;
  y4m_ratio_reduce (&frame_rate_ratio);
  ratio = (float) frame_rate_ratio.n / frame_rate_ratio.d;

  ratio_percent_frame.d = 1;
  ratio_percent_frame.n = 0;

  if (ratio == 0)
    mjpeg_error_exit1 ("Cannot have ratio =0 ");
  else if (ratio > 128)
    mjpeg_error_exit1 ("Cannot have ratio >128  ");


  if ((y4m_si_get_interlace (&istreaminfo) != Y4M_ILACE_NONE)
      && (!force_interlacing))
    {
      mjpeg_error_exit1 ("Sorry, can only convert progressive streams");
    }

  /* write the outstream header */
  y4m_write_stream_header (fd_out, &ostreaminfo);

  /* now allocate the needed buffers */
  {
    /* calculate the memory offset needed to allow the processing
     * functions to overshot. The biggest overshot is needed for the
     * MC-functions, so we'll use 8*width...
     */
    buff_offset = width * 8;
    buff_size = buff_offset * 2 + width * height;

    inframe[0] = buff_offset + (uint8_t *) malloc (buff_size);
    inframe[1] = buff_offset + (uint8_t *) malloc (buff_size);
    inframe[2] = buff_offset + (uint8_t *) malloc (buff_size);

    reconstructed[0] = buff_offset + (uint8_t *) malloc (buff_size);
    reconstructed[1] = buff_offset + (uint8_t *) malloc (buff_size);
    reconstructed[2] = buff_offset + (uint8_t *) malloc (buff_size);

    frame1[0] = buff_offset + (uint8_t *) malloc (buff_size);
    frame1[1] = buff_offset + (uint8_t *) malloc (buff_size);
    frame1[2] = buff_offset + (uint8_t *) malloc (buff_size);

    mjpeg_log (LOG_INFO, "Buffers allocated.");
  }

  /* initialize motion-search-pattern */
  init_search_pattern ();

  errno = y4m_read_frame (fd_in, &istreaminfo, &iframeinfo, frame1);
  if (errno != Y4M_OK)
    goto The_end;

  /* read every frame until the end of the input stream and process it */
  while (Y4M_OK == (errno = y4m_read_frame (fd_in,
					    &istreaminfo,
					    &iframeinfo, inframe)))
    {
/* frame1 contains the previous input frame
 * inframe contains the current input frame
 * reconstructed contains the current output frame
 * percent_frame is the amount of time after which the output frame is sent 
 * 	in percent of the time between input frames
 *
 * Input:
 * frame1 . . . . . . . . . . . . . . . . . . inframe
 * Output: 
 * . . . . . . . . . . .reconstructed. . . . . . . 
 * |<- - percent_frame - - - ->|
 * |< - - - - - - - - - -100% - - - - - - - - - >|
 *
 * The variable ratio_percent_frame is the fractional representation of
 * percent_frame; it is there to avoid rounding errors 
 */
      input_frame_number++;

      if (verbose)
	{
	  mjpeg_log (LOG_INFO, "Input frame number %d", input_frame_number);
	}

      while (percent_frame < (1.0 - percent_threshold))
	{
	  output_frame_number++;
	  if (verbose)
	    {
	      mjpeg_log (LOG_INFO, "Output frame number %d",
			 output_frame_number);
	    }

#define ABS(value) ((value)<0)?-(value):(value)

	  if (ABS (percent_frame) <= percent_threshold)
	    {
	      /* I put a threshold here to avoid wasting time */
	      /* The output frame coincides with the input frame
	       * so there is no need to do any processing 
	       * just copy the input frame as is */
	      y4m_write_frame (fd_out, &ostreaminfo, &oframeinfo, frame1);
	      if (verbose)
		mjpeg_log (LOG_INFO, "Percent %f rounded to next frame",
			   percent_frame);
	    }
	  else
	    {
	      /* We have to interpolate the frame (between the current inframe
	       * and the previous frame1 
	       * if there is a scene change, motion_compensate_field will
	       * return 1 and we use the previous frame */

	      if (verbose)
		mjpeg_log (LOG_INFO, "Percent %f", percent_frame);

	      scene_change = motion_compensate_field ();
	      if (scene_change)
		{
		  mjpeg_log (LOG_INFO, "Scene change at frame %d",
			     input_frame_number);
		  y4m_write_frame (fd_out, &ostreaminfo, &oframeinfo, frame1);
		}
	      else
		{
		  y4m_write_frame (fd_out, &ostreaminfo, &oframeinfo,
				   reconstructed);
		}
	    }
	  ratio_percent_frame =
	    add_ratio (ratio_percent_frame, frame_rate_ratio);
	  percent_frame = Y4M_RATIO_DBL (ratio_percent_frame);

	}

      /* Skip input frames if downsampling  (ratio > 1)
       * when upsampling, ratio < 1
       *    so we have ( 1< percent_frame < 2) at this point 
       *    hence we don't go in in the loop */
      while (percent_frame >= 2)
	{
	  percent_frame = percent_frame - 1;
	  ratio_percent_frame = ratio_minus_1 (ratio_percent_frame);
	  if (Y4M_OK !=
	      (errno =
	       y4m_read_frame (fd_in, &istreaminfo, &iframeinfo, inframe)))
	    goto The_end;
	}
      ratio_percent_frame = ratio_minus_1 (ratio_percent_frame);
      percent_frame = percent_frame - 1;

      /* store the previous frame */
      memcpy (frame1[0], inframe[0], width * height);
      memcpy (frame1[1], inframe[1], width * height / 4);
      memcpy (frame1[2], inframe[2], width * height / 4);

    }

The_end:

  /* free allocated buffers */
  {
    free (inframe[0] - buff_offset);
    free (inframe[1] - buff_offset);
    free (inframe[2] - buff_offset);

    free (reconstructed[0] - buff_offset);
    free (reconstructed[1] - buff_offset);
    free (reconstructed[2] - buff_offset);

    free (frame1[0] - buff_offset);
    free (frame1[1] - buff_offset);
    free (frame1[2] - buff_offset);


    mjpeg_log (LOG_INFO, "Buffers freed.");
  }

  /* did stream end unexpectedly ? */
  if (errno != Y4M_ERR_EOF)
    mjpeg_error_exit1 ("%s", y4m_strerr (errno));

  /* Exit gently */
  return (0);
}
コード例 #25
0
ファイル: interact.cpp プロジェクト: zyh329/mjpegtools-1
void MultiplexJob::SetupInputStreams( std::vector< IBitStream *> &inputs )
{
    IBitStream *bs;
    IBitStreamUndo undo;
    unsigned int i;
    bool bad_file = false;

    for( i = 0; i < inputs.size(); ++i )
    {
        bs = inputs[i];
        // Remember the streams initial state...
        bs->PrepareUndo( undo );
        if( LPCMStream::Probe( *bs ) )
        {
            mjpeg_info ("File %s looks like an LPCM Audio stream.",
                        bs->StreamName());
            bs->UndoChanges( undo );
            streams.push_back( new JobStream( bs,  LPCM_AUDIO) );
            ++audio_tracks;
            ++lpcm_tracks;
            continue;
        }

        bs->UndoChanges( undo );
        if( MPAStream::Probe( *bs ) )
        {
            mjpeg_info ("File %s looks like an MPEG Audio stream.",
                        bs->StreamName() );
            bs->UndoChanges( undo );
            streams.push_back( new JobStream( bs, MPEG_AUDIO) );
            ++audio_tracks;
            continue;
        }

        bs->UndoChanges( undo );
        if( AC3Stream::Probe( *bs ) )
        {
            mjpeg_info ("File %s looks like an AC3 Audio stream.",
                        bs->StreamName());
            bs->UndoChanges( undo );
            streams.push_back( new JobStream( bs, AC3_AUDIO) );
            ++audio_tracks;
            continue;
        }

        bs->UndoChanges( undo );
        if( DTSStream::Probe( *bs ) )
        {
            mjpeg_info ("File %s looks like a dts Audio stream.",
                        bs->StreamName());
            bs->UndoChanges( undo);
            streams.push_back( new JobStream( bs, DTS_AUDIO) );
            ++audio_tracks;
            continue;
        }

        bs->UndoChanges( undo );
        if( VideoStream::Probe( *bs ) )
        {
            mjpeg_info ("File %s looks like an MPEG Video stream.",
                        bs->StreamName());
            bs->UndoChanges( undo );
            streams.push_back( new JobStream( bs, MPEG_VIDEO) );
            ++video_tracks;
            continue;
        }

        bs->UndoChanges( undo );

        if( SUBPStream::Probe( *bs ) )
        {
            mjpeg_info ("File %s looks like an Subpicture stream.",
                        bs->StreamName());
            bs->UndoChanges( undo );
            streams.push_back( new JobStream( bs, SUBP_STREAM) );
            ++subtitle_tracks;
            continue;
        }


#ifdef ZALPHA
        if( ZAlphaStream::Probe( *bs ) )
        {
            mjpeg_info ("File %s looks like an Z/Alpha Video stream.",
                        bs->StreamName());
            bs->UndoChanges( undo );
            streams.push_back( new JobStream( bs, Z_ALPHA) );
            ++video_tracks;
            ++z_alpha_tracks;
            continue;
        }
#endif
        bad_file = true;
        mjpeg_error ("File %s unrecogniseable!", bs->StreamName());
        delete bs;
    }

    if( bad_file )
    {
        mjpeg_error_exit1( "Unrecogniseable file(s)... exiting.");
    }

    //
    // Where no parameters for streams have been specified
    // simply set the default values (these will depend on the format
    // we're muxing of course...)
    //

    for( i = video_param.size(); i < video_tracks; ++i )
    {
        video_param.push_back(VideoParams::Default( mux_format ));
    }
    for( i = lpcm_param.size(); i < lpcm_tracks; ++i )
    {
        lpcm_param.push_back(LpcmParams::Default(mux_format));
    }
    for( i = subtitle_params.size(); i < subtitle_tracks; ++i )
    {
        subtitle_params.push_back(SubtitleStreamParams::Default(mux_format));
    }


    //
    // Set standard values if the selected profile implies this...
    //
    for( i = 0; i <video_tracks; ++i )
    {
        if( video_param[i]->Force(mux_format) )
        {
            mjpeg_info( "Video stream %d: profile %d selected - ignoring non-standard options!", i, mux_format );
        }
    }

    mjpeg_info( "Found %d audio streams, %d video streams and %d subtitle streams",
                audio_tracks,
                video_tracks,
                subtitle_tracks
              );

}
コード例 #26
0
ファイル: mpastrm_in.cpp プロジェクト: BlackMael/DirectEncode
void MPAStream::Init ( const int stream_num )

{
	int padding_bit;

	MuxStream::Init( AUDIO_STR_0 + stream_num, 
					 0,  // Buffer scale
					 muxinto.audio_buffer_size,
					 muxinto.vcd_zero_stuffing,
					 muxinto.buffers_in_audio,
					 muxinto.always_buffers_in_audio
		);
    mjpeg_info ("Scanning for header info: Audio stream %02x (%s)",
                AUDIO_STR_0 + stream_num,
                bs.StreamName()
                );

	/* A.Stevens 2000 - update to be compatible up to  MPEG2.5
	 */
    AU_start = bs.bitcount();
    if (bs.GetBits(11)==AUDIO_SYNCWORD)
    {
		num_syncword++;
		version_id = bs.GetBits( 2);
		layer 		= 3-bs.GetBits( 2); /* 0..2 not 1..3!! */
		protection 		= bs.Get1Bit();
		bit_rate_code	= bs.GetBits( 4);
		frequency 		= bs.GetBits( 2);
		padding_bit     = bs.Get1Bit();
		bs.Get1Bit();
		mode 		= bs.GetBits( 2);
		mode_extension 	= bs.GetBits( 2);
		copyright 		= bs.Get1Bit();
		original_copy 	= bs.Get1Bit ();
		emphasis		= bs.GetBits( 2);

		framesize =
			mpa_bitrates_kbps[version_id][layer][bit_rate_code]  * 
			mpa_slots[layer] *1000 /
			mpa_freq_table[version_id][frequency];

		size_frames[0] = framesize * ( layer == 0 ? 4 : 1);
		size_frames[1] = (framesize+1) * ( layer == 0 ? 4 : 1);
		num_frames[padding_bit]++;
        access_unit.start  = AU_start;
		access_unit.length = size_frames[padding_bit];
	  
		samples_per_second = mpa_freq_table[version_id][frequency];

		/* Presentation time-stamping  */
		access_unit.PTS = static_cast<clockticks>(decoding_order) * 
			static_cast<clockticks>(mpa_samples [layer]) * 
			static_cast<clockticks>(CLOCKS)	/ samples_per_second;
		access_unit.DTS = access_unit.PTS;
		access_unit.dorder = decoding_order;
		++decoding_order;
		aunits.Append( access_unit );

    } else
    {
		mjpeg_error_exit1 ( "Invalid MPEG Audio stream header.");
		// exit (1);
    }


	OutputHdrInfo();
}
コード例 #27
0
ファイル: ppmtoy4m.c プロジェクト: AquaSoftGmbH/mjpeg
static
int read_ppm_frame(int fd, ppm_info_t *ppm,
		   uint8_t *buffers[], uint8_t *buffers2[],
		   int ilace, int ileave, int bgr)
{
  int width, height;
  static uint8_t *rowbuffer = NULL;
  int err;

  err = read_ppm_header(fd, &width, &height);
  if (err > 0) return 1;  /* EOF */
  if (err < 0) return -1; /* error */
  mjpeg_debug("Got PPM header:  %dx%d", width, height);

  if (ppm->width == 0) {
    /* first time */
    mjpeg_debug("Initializing PPM read_frame");
    ppm->width = width;
    ppm->height = height;
    rowbuffer = malloc(width * 3 * sizeof(rowbuffer[0]));
  } else {
    /* make sure everything matches */
    if ( (ppm->width != width) ||
	 (ppm->height != height) )
      mjpeg_error_exit1("One of these frames is not like the others!");
  }
  if (buffers[0] == NULL) 
    alloc_buffers(buffers, width, height);
  if ((buffers2[0] == NULL) && (ilace != Y4M_ILACE_NONE))
    alloc_buffers(buffers2, width, height);

  mjpeg_debug("Reading rows");

  if ((ilace != Y4M_ILACE_NONE) && (ileave)) {
    /* Interlaced and Interleaved:
       --> read image and deinterleave fields at same time */
    if (ilace == Y4M_ILACE_TOP_FIRST) {
      /* 1st buff arg == top field == temporally first == "buffers" */
      read_ppm_into_two_buffers(fd, buffers, buffers2,
				rowbuffer, width, height, bgr);
    } else {
      /* bottom-field-first */
      /* 1st buff art == top field == temporally second == "buffers2" */
      read_ppm_into_two_buffers(fd, buffers2, buffers,
				rowbuffer, width, height, bgr);
    }      
  } else if ((ilace == Y4M_ILACE_NONE) || (!ileave)) {
    /* Not Interlaced, or Not Interleaved:
       --> read image into first buffer... */
    read_ppm_into_one_buffer(fd, buffers, rowbuffer, width, height, bgr);
    if ((ilace != Y4M_ILACE_NONE) && (!ileave)) {
      /* ...Actually Interlaced:
	 --> read the second image/field into second buffer */
      err = read_ppm_header(fd, &width, &height);
      if (err > 0) return 1;  /* EOF */
      if (err < 0) return -1; /* error */
      mjpeg_debug("Got PPM header:  %dx%d", width, height);
      
      /* make sure everything matches */
      if ( (ppm->width != width) ||
	   (ppm->height != height) )
	mjpeg_error_exit1("One of these frames is not like the others!");
      read_ppm_into_one_buffer(fd, buffers2, rowbuffer, width, height, bgr);
    }
  }
  return 0;
}
コード例 #28
0
ファイル: y4munsharp.c プロジェクト: AquaSoftGmbH/mjpeg
int
main(int argc, char **argv)
	{
	int	fdin, fdout, err, c, i, verbose = 1;
	y4m_stream_info_t istream, ostream;
	y4m_frame_info_t iframe;

	fdin = fileno(stdin);
	fdout = fileno(stdout);

	y4m_accept_extensions(1);
	y4m_init_stream_info(&istream);
	y4m_init_frame_info(&iframe);

	while	((c = getopt(argc, argv, "L:C:hv:N")) != EOF)
		{
		switch	(c)
			{
			case	'N':
				lowuv = lowy = 0;
				lowuv = highy = 255;
				break;
			case	'L':
				i = sscanf(optarg, "%lf,%lf,%d", &y_radius, 
						&y_amount, &y_threshold);
				if	(i != 3)
					{
					mjpeg_error("-L r,a,t");
					usage(argv[0]);
					}
				break;
			case	'C':
				i = sscanf(optarg, "%lf,%lf,%d", &uv_radius,
						&uv_amount, &uv_threshold);
				if	(i != 3)
					{
					mjpeg_error("-C r,a,t");
					usage(argv[0]);
					}
				break;
			case	'v':
				verbose = atoi(optarg);
				if	(verbose < 0 || verbose > 2)
					mjpeg_error_exit1("-v 0|1|2");
				break;
			case	'h':
			default:
				usage(argv[0]);
				break;
			}
		}

	if	(isatty(fdout))
		mjpeg_error_exit1("stdout must not be a terminal");

	mjpeg_default_handler_verbosity(verbose);

	err = y4m_read_stream_header(fdin, &istream);
	if	(err != Y4M_OK)
		mjpeg_error_exit1("Couldn't read input stream header");

	switch	(y4m_si_get_interlace(&istream))
		{
		case	Y4M_ILACE_NONE:
			interlaced = 0;
			break;
		case	Y4M_ILACE_BOTTOM_FIRST:
		case	Y4M_ILACE_TOP_FIRST:
			interlaced = 1;
			break;
		default:
			mjpeg_error_exit1("Unsupported/unknown interlacing");
		}

	if	(y4m_si_get_plane_count(&istream) != 3)
		mjpeg_error_exit1("Only 3 plane formats supported");

	yheight = y4m_si_get_plane_height(&istream, 0);
	uvheight = y4m_si_get_plane_height(&istream, 1);
	ywidth = y4m_si_get_plane_width(&istream, 0);
	uvwidth = y4m_si_get_plane_width(&istream, 1);
	ylen = y4m_si_get_plane_length(&istream, 0);
	uvlen = y4m_si_get_plane_length(&istream, 1);

/* Input and output frame buffers */
	i_yuv[0] = (u_char *)malloc(ylen);
	i_yuv[1] = (u_char *)malloc(uvlen);
	i_yuv[2] = (u_char *)malloc(uvlen);
	o_yuv[0] = (u_char *)malloc(ylen);
	o_yuv[1] = (u_char *)malloc(uvlen);
	o_yuv[2] = (u_char *)malloc(uvlen);

/*
 * general purpose row/column scratch buffers.  Slightly over allocated to
 * simplify life.
*/
	cur_col = (u_char *)malloc(MAX(ywidth, yheight));
	dest_col = (u_char *)malloc(MAX(ywidth, yheight));
	cur_row = (u_char *)malloc(MAX(ywidth, yheight));
	dest_row = (u_char *)malloc(MAX(ywidth, yheight));

/*
 * Generate the convolution matrices.  The generation routine allocates the
 * memory and returns the length.
*/
	cmatrix_y_len = gen_convolve_matrix(y_radius, &cmatrix_y);
	cmatrix_uv_len = gen_convolve_matrix(uv_radius, &cmatrix_uv);
	ctable_y = gen_lookup_table(cmatrix_y, cmatrix_y_len);
	ctable_uv = gen_lookup_table(cmatrix_uv, cmatrix_uv_len);

	y4m_init_stream_info(&ostream);
	y4m_copy_stream_info(&ostream, &istream);
	y4m_write_stream_header(fileno(stdout), &ostream);

	mjpeg_info("Luma radius: %f", y_radius);
	mjpeg_info("Luma amount: %f", y_amount);
	mjpeg_info("Luma threshold: %d", y_threshold);
	if	(uv_radius != -1.0)
		{
		mjpeg_info("Chroma radius: %f", uv_radius);
		mjpeg_info("Chroma amount: %f", uv_amount);
		mjpeg_info("Chroma threshold: %d", uv_threshold);
		}

	for	(frameno = 0; y4m_read_frame(fdin, &istream, &iframe, i_yuv) == Y4M_OK; frameno++)
		{
		y4munsharp();
		err = y4m_write_frame(fdout, &ostream, &iframe, o_yuv);
		if	(err != Y4M_OK)
			{
			mjpeg_error("y4m_write_frame err at frame %d", frameno);
			break;
			}
		}
	y4m_fini_frame_info(&iframe);
	y4m_fini_stream_info(&istream);
	y4m_fini_stream_info(&ostream);
	exit(0);
	}
コード例 #29
0
ファイル: ppmtoy4m.c プロジェクト: AquaSoftGmbH/mjpeg
int main(int argc, char **argv)
{
  cl_info_t cl;
  y4m_stream_info_t sinfo;
  y4m_frame_info_t finfo;
  uint8_t *buffers[Y4M_MAX_NUM_PLANES];  /* R'G'B' or Y'CbCr */
  uint8_t *buffers2[Y4M_MAX_NUM_PLANES]; /* R'G'B' or Y'CbCr */
  ppm_info_t ppm;
  int field_height;

  int fdout = 1;
  int err, i, count, repeating_last;

  y4m_accept_extensions(1);
  y4m_init_stream_info(&sinfo);
  y4m_init_frame_info(&finfo);

  parse_args(&cl, argc, argv);

  ppm.width = 0;
  ppm.height = 0;
  for (i = 0; i < 3; i++) {
    buffers[i] = NULL;
    buffers2[i] = NULL;
  }

  /* Read first PPM frame/field-pair, to get dimensions */
  if (read_ppm_frame(cl.fdin, &ppm, buffers, buffers2, 
		     cl.interlace, cl.interleave, cl.bgr))
    mjpeg_error_exit1("Failed to read first frame.");

  /* Setup streaminfo and write output header */
  setup_output_stream(fdout, &cl, &sinfo, &ppm, &field_height);

  /* Loop 'framecount' times, or possibly forever... */
  for (count = 0, repeating_last = 0;
       (count < (cl.offset + cl.framecount)) || (cl.framecount == 0);
       count++) {

    if (repeating_last) goto WRITE_FRAME;

    /* Read PPM frame/field */
    /* ...but skip reading very first frame, already read prior to loop */
    if (count > 0) {
      err = read_ppm_frame(cl.fdin, &ppm, buffers, buffers2, 
			   cl.interlace, cl.interleave, cl.bgr);
      if (err == 1) {
	/* clean input EOF */
	if (cl.repeatlast) {
	  repeating_last = 1;
	  goto WRITE_FRAME;
	} else if (cl.framecount != 0) {
	  mjpeg_error_exit1("Input frame shortfall (only %d converted).",
			    count - cl.offset);
	} else {
	  break;  /* input is exhausted; we are done!  time to go home! */
	}
      } else if (err)
	mjpeg_error_exit1("Error reading ppm frame");
    }
    
    /* ...skip transforms if we are just going to skip this frame anyway.
       BUT, if 'cl.repeatlast' is on, we must process/buffer every frame,
       because we don't know when we will see the last one. */
    if ((count >= cl.offset) || (cl.repeatlast)) {
      /* Transform colorspace, then subsample (in place) */
      convert_RGB_to_YCbCr(buffers, ppm.width * field_height);
      chroma_subsample(cl.ss_mode, buffers, ppm.width, field_height);
      if (cl.interlace != Y4M_ILACE_NONE) {
	convert_RGB_to_YCbCr(buffers2, ppm.width * field_height);
	chroma_subsample(cl.ss_mode, buffers2, ppm.width, field_height);
      }
    }

  WRITE_FRAME:
    /* Write converted frame to output */
    if (count >= cl.offset) {
      switch (cl.interlace) {
      case Y4M_ILACE_NONE:
	if ((err = y4m_write_frame(fdout, &sinfo, &finfo, buffers)) != Y4M_OK)
	  mjpeg_error_exit1("Write frame failed: %s", y4m_strerr(err));
	break;
      case Y4M_ILACE_TOP_FIRST:
	if ((err = y4m_write_fields(fdout, &sinfo, &finfo, buffers, buffers2))
	    != Y4M_OK)
	  mjpeg_error_exit1("Write fields failed: %s", y4m_strerr(err));
	break;
      case Y4M_ILACE_BOTTOM_FIRST:
	if ((err = y4m_write_fields(fdout, &sinfo, &finfo, buffers2, buffers))
	    != Y4M_OK)
	  mjpeg_error_exit1("Write fields failed: %s", y4m_strerr(err));
	break;
      default:
	mjpeg_error_exit1("Unknown ilace type!   %d", cl.interlace);
	break;
      }
    }
  } 


  for (i = 0; i < 3; i++) {
    free(buffers[i]);
    free(buffers2[i]);
  }
  y4m_fini_stream_info(&sinfo);
  y4m_fini_frame_info(&finfo);

  mjpeg_debug("Done.");
  return 0;
}
コード例 #30
0
/// Prefills the internal buffer for output multiplexing.
/// @param frames_to_buffer the number of audio frames to read ahead
void DTSStream::FillAUbuffer(unsigned int frames_to_buffer )
{
    unsigned int packet_samples;

	last_buffered_AU += frames_to_buffer;
	mjpeg_debug( "Scanning %d dts audio frames to frame %d", 
				 frames_to_buffer, last_buffered_AU );

	while( !bs.eos() && decoding_order < last_buffered_AU 
            && !muxinto.AfterMaxPTS(access_unit.PTS) )
	{
		int skip = access_unit.length - header_skip; 
        bs.SeekFwdBits(skip);
		prev_offset = AU_start;
		AU_start = bs.bitcount();

        if( AU_start - prev_offset != access_unit.length*8 )
        {
            mjpeg_warn( "Discarding incomplete final frame dts stream %d!",
                       stream_num);
            aunits.DropLast();
            decoding_order--;
            break;
        }

		/* Check if we have reached the end or have  another catenated 
		   stream to process before finishing ... */
		if ( (syncword = bs.GetBits(32))!=DTS_SYNCWORD )
		{
			if( !bs.eos()   )
			{
				mjpeg_error_exit1( "Can't find next dts frame: @ %lld we have %04x - broken bit-stream?", AU_start/8, syncword );
            }
            break;
		}

        bs.GetBits(6);         // additional sync
        bs.GetBits(1);         // CRC
        packet_samples = (bs.GetBits(7) + 1) * 32;         // pcm samples
        framesize = bs.GetBits(14) + 1;        // frame size

        bs.GetBits(6);              // audio channels
        bs.GetBits(4);              // sample rate code
        bs.GetBits(5);              // bitrate
        bs.GetBits(5);              // misc.

        access_unit.start = AU_start;
		access_unit.length = framesize;
		access_unit.PTS = static_cast<clockticks>(decoding_order) * 
			static_cast<clockticks>(packet_samples) * 
			static_cast<clockticks>(CLOCKS)	/ samples_per_second;
		access_unit.DTS = access_unit.PTS;
		access_unit.dorder = decoding_order;
		decoding_order++;
		aunits.Append( access_unit );
		num_frames++;

		num_syncword++;

		if (num_syncword >= old_frames+10 )
		{
			mjpeg_debug ("Got %d frame headers.", num_syncword);
			old_frames=num_syncword;
		}

    }
	last_buffered_AU = decoding_order;
	eoscan = bs.eos() || muxinto.AfterMaxPTS(access_unit.PTS);
}