Beispiel #1
0
void OpcodeDeltaPerCol::apply( RawImage &in, RawImage &out, int startY, int endY )
{
  if (in->getDataType() == TYPE_USHORT16) {
    int cpp = out->getCpp();
    for (int y = startY; y < endY; y += mRowPitch) {
      ushort16 *src = (ushort16*)out->getData(mAoi.getLeft(), y);
      // Add offset, so this is always first plane
      src+=mFirstPlane;
      for (int x = 0; x < mAoi.getWidth(); x += mColPitch) {
        for (int p = 0; p < mPlanes; p++)
        {
          src[x*cpp+p] = clampbits(16, mDeltaX[x] + src[x*cpp+p]);
        }
      }
    }
  } else {
    int cpp = out->getCpp();
    for (int y = startY; y < endY; y += mRowPitch) {
      float *src = (float*)out->getData(mAoi.getLeft(), y);
      // Add offset, so this is always first plane
      src+=mFirstPlane;
      for (int x = 0; x < mAoi.getWidth(); x += mColPitch) {
        for (int p = 0; p < mPlanes; p++)
        {
          src[x*cpp+p] = mDelta[x] + src[x*cpp+p];
        }
      }
    }
  }
}
Beispiel #2
0
bool CaptureFromFile::copyAndConvertFrame(const RawImage & src, RawImage & target)
{
#ifndef VDATA_NO_QT
  mutex.lock();
#endif
  ColorFormat output_fmt = Colors::stringToColorFormat(v_colorout->getSelection().c_str());
  ColorFormat src_fmt=src.getColorFormat();
    
  if (target.getData()==0)
    target.allocate(output_fmt, src.getWidth(), src.getHeight());
  else
    target.ensure_allocation(output_fmt, src.getWidth(), src.getHeight());
     
  target.setTime(src.getTime());
     
  if (output_fmt == src_fmt)
  {
    if (src.getData() != 0)
      memcpy(target.getData(),src.getData(),src.getNumBytes());
  }
  else if (src_fmt == COLOR_RGB8 && output_fmt == COLOR_YUV422_UYVY)
  {
    if (src.getData() != 0)
      dc1394_convert_to_YUV422(src.getData(), target.getData(), src.getWidth(), src.getHeight(), 
                               DC1394_BYTE_ORDER_UYVY, DC1394_COLOR_CODING_RGB8, 8);
  }
  else if (src_fmt == COLOR_YUV422_UYVY && output_fmt == COLOR_RGB8)
  {
    if (src.getData() != 0)
      dc1394_convert_to_RGB8(src.getData(),target.getData(), src.getWidth(), src.getHeight(), 
                             DC1394_BYTE_ORDER_UYVY, DC1394_COLOR_CODING_YUV422, 8);
  } 
  else 
  {
    fprintf(stderr,"Cannot copy and convert frame...unknown conversion selected from: %s to %s\n",
            Colors::colorFormatToString(src_fmt).c_str(),
            Colors::colorFormatToString(output_fmt).c_str());
#ifndef VDATA_NO_QT
    mutex.unlock();
#endif
    return false;
  } 
#ifndef VDATA_NO_QT
  mutex.unlock();
#endif
  return true;
}
Beispiel #3
0
void OpcodeMapPolynomial::apply( RawImage &in, RawImage &out, int startY, int endY )
{
  int cpp = out->getCpp();
  for (int y = startY; y < endY; y += mRowPitch) {
    ushort16 *src = (ushort16*)out->getData(mAoi.getLeft(), y);
    // Add offset, so this is always first plane
    src+=mFirstPlane;
    for (int x = 0; x < mAoi.getWidth(); x += mColPitch) {
      for (int p = 0; p < mPlanes; p++)
      {
        src[x*cpp+p] = mLookup[src[x*cpp+p]];
      }
    }
  }
}
Beispiel #4
0
void OpcodeScalePerRow::apply( RawImage &in, RawImage &out, int startY, int endY )
{
  if (in->getDataType() == TYPE_USHORT16) {
    int cpp = out->getCpp();
    for (int y = startY; y < endY; y += mRowPitch) {
      ushort16 *src = (ushort16*)out->getData(mAoi.getLeft(), y);
      // Add offset, so this is always first plane
      src+=mFirstPlane;
      int delta = (int)(1024.0f * mDelta[y]);
      for (int x = 0; x < mAoi.getWidth(); x += mColPitch) {
        for (int p = 0; p < mPlanes; p++)
        {
          src[x*cpp+p] = clampbits(16,(delta * src[x*cpp+p] + 512) >> 10);
        }
      }
    }
  } else {
Beispiel #5
0
void OpcodeFixBadPixelsConstant::apply( RawImage &in, RawImage &out, int startY, int endY )
{
  iPoint2D crop = in->getCropOffset();
  uint32 offset = crop.x | (crop.y << 16);
  vector<uint32> bad_pos;
  for (int y = startY; y < endY; y ++) {
    ushort16* src = (ushort16*)out->getData(0, y);
    for (int x = 0; x < in->dim.x; x++) {
      if (src[x]== mValue) {
        bad_pos.push_back(offset + ((uint32)x | (uint32)y<<16));
      }
    }
  }
  if (!bad_pos.empty()) {
	//mutex::lock(&out->mBadPixelMutex);
	out->mBadPixelMutex.lock();
    out->mBadPixelPositions.insert(out->mBadPixelPositions.end(), bad_pos.begin(), bad_pos.end());
   // pthread_mutex_unlock(&out->mBadPixelMutex);
	out->mBadPixelMutex.unlock();
  }

}
Beispiel #6
0
void GLLUTWidget::sampleImage(const RawImage & img) {
  //compute slice it sits on:
  ColorFormat source_format=img.getColorFormat();
  
  int n=img.getNumPixels();
  
  yuv color;
  int i=0;
  
  if (img.getWidth() > 1 && img.getHeight() > 1) {
    if (source_format==COLOR_RGB8) {
      rgbImage rgb_img(img);
      rgb * color_rgb=rgb_img.getPixelData();
      for (int j=0;j<n;j++) {
        color=Conversions::rgb2yuv(*color_rgb);
        i=_lut->norm2lutX(color.y);
        if (i >= 0 && i < (int)slices.size()) {
          drawSample(i,_lut->norm2lutY(color.u),_lut->norm2lutZ(color.v));
          //slices[i]->sampler->surface.setPixel(_lut->norm2lutY(color.u),_lut->norm2lutZ(color.v),rgba(255,255,255,255));
          slices[i]->sampler_update_pending=true;
        }
        color_rgb++;
      }
    } else if (source_format==COLOR_YUV444) {    
      yuvImage yuv_img(img);
      yuv * color_yuv=yuv_img.getPixelData();
      for (int j=0;j<n;j++) {
        color=(*color_yuv);
        i=_lut->norm2lutX(color.y);
        if (i >= 0 && i < (int)slices.size()) {
          //slices[i]->sampler->surface.setPixel(_lut->norm2lutY(color.u),_lut->norm2lutZ(color.v),rgba(255,255,255,255));
          drawSample(i,_lut->norm2lutY(color.u),_lut->norm2lutZ(color.v));
          slices[i]->sampler_update_pending=true;
        }
        color_yuv++;
      }
    } else if (source_format==COLOR_YUV422_UYVY) {
        uyvy * color_uyvy = (uyvy*)img.getData();
        uyvy color_uyvy_tmp;
        for (int j=0;j<n;j+=2) {
          color_uyvy_tmp=(*color_uyvy);
          color.u=color_uyvy_tmp.u;
          color.v=color_uyvy_tmp.v;
  
          color.y=color_uyvy_tmp.y1;
          i=_lut->norm2lutX(color.y);
          if (i >= 0 && i < (int)slices.size()) {
            //slices[i]->sampler->surface.setPixel(_lut->norm2lutY(color.u),_lut->norm2lutZ(color.v),rgba(255,255,255,255));
            drawSample(i,_lut->norm2lutY(color.u),_lut->norm2lutZ(color.v));
            slices[i]->sampler_update_pending=true;
          }
  
          color.y=color_uyvy_tmp.y2;
          i=_lut->norm2lutX(color.y);
          if (i >= 0 && i < (int)slices.size()) {
            //slices[i]->sampler->surface.setPixel(_lut->norm2lutY(color.u),_lut->norm2lutZ(color.v),rgba(255,255,255,255));
            drawSample(i,_lut->norm2lutY(color.u),_lut->norm2lutZ(color.v));
            slices[i]->sampler_update_pending=true;
          }
          color_uyvy++;
        }
    } else {
      fprintf(stderr,"Unable to sample colors from frame of format: %s\n",Colors::colorFormatToString(source_format).c_str());
      fprintf(stderr,"Currently supported are rgb8, yuv444, and yuv422 (UYVY).\n");
      fprintf(stderr,"(Feel free to add more conversions to glLUTwidget.cpp).\n");
    }
   }
  redraw();

}
Beispiel #7
0
RawImage Cr2Decoder::decodeRawInternal() {
  if(hints.find("old_format") != hints.end()) {
    uint32 off = 0;
    if (mRootIFD->getEntryRecursive((TiffTag)0x81))
      off = mRootIFD->getEntryRecursive((TiffTag)0x81)->getInt();
    else {
      vector<TiffIFD*> data = mRootIFD->getIFDsWithTag(CFAPATTERN);
      if (data.empty())
        ThrowRDE("CR2 Decoder: Couldn't find offset");
      else {
        if (data[0]->hasEntry(STRIPOFFSETS))
          off = data[0]->getEntry(STRIPOFFSETS)->getInt();
        else
          ThrowRDE("CR2 Decoder: Couldn't find offset");
      }
    }

    ByteStream *b;
    if (getHostEndianness() == big)
      b = new ByteStream(mFile, off+41);
    else
      b = new ByteStreamSwap(mFile, off+41);
    uint32 height = b->getShort();
    uint32 width = b->getShort();

    // Every two lines can be encoded as a single line, probably to try and get
    // better compression by getting the same RGBG sequence in every line
    if(hints.find("double_line_ljpeg") != hints.end()) {
      height *= 2;
      mRaw->dim = iPoint2D(width*2, height/2);
    }
    else {
      width *= 2;
      mRaw->dim = iPoint2D(width, height);
    }

    mRaw->createData();
    LJpegPlain *l = new LJpegPlain(mFile, mRaw);
    try {
      l->startDecoder(off, mFile->getSize()-off, 0, 0);
    } catch (IOException& e) {
      mRaw->setError(e.what());
    }

    delete l;

    if(hints.find("double_line_ljpeg") != hints.end()) {
      // We now have a double width half height image we need to convert to the
      // normal format
      iPoint2D final_size(width, height);
      RawImage procRaw = RawImage::create(final_size, TYPE_USHORT16, 1);
      procRaw->metadata = mRaw->metadata;
      procRaw->copyErrorsFrom(mRaw);

      for (uint32 y = 0; y < height; y++) {
        ushort16 *dst = (ushort16*)procRaw->getData(0,y);
        ushort16 *src = (ushort16*)mRaw->getData(y%2 == 0 ? 0 : width, y/2);
        for (uint32 x = 0; x < width; x++)
          dst[x] = src[x];
      }
      mRaw = procRaw;
    }

    if (mRootIFD->getEntryRecursive((TiffTag)0x123)) {
      TiffEntry *curve = mRootIFD->getEntryRecursive((TiffTag)0x123);
      if (curve->type == TIFF_SHORT && curve->count == 4096) {
        TiffEntry *linearization = mRootIFD->getEntryRecursive((TiffTag)0x123);
        uint32 len = linearization->count;
        ushort16 *table = new ushort16[len];
        linearization->getShortArray(table, len);
        if (!uncorrectedRawValues) {
          mRaw->setTable(table, 4096, true);
          // Apply table
          mRaw->sixteenBitLookup();
          // Delete table
          mRaw->setTable(NULL);
        } else {
          // We want uncorrected, but we store the table.
          mRaw->setTable(table, 4096, false);
        }
      }
    }

    return mRaw;
  }

  vector<TiffIFD*> data = mRootIFD->getIFDsWithTag((TiffTag)0xc5d8);

  if (data.empty())
    ThrowRDE("CR2 Decoder: No image data found");


  TiffIFD* raw = data[0];
  mRaw = RawImage::create();
  mRaw->isCFA = true;
  vector<Cr2Slice> slices;
  int completeH = 0;
  bool doubleHeight = false;

  try {
    TiffEntry *offsets = raw->getEntry(STRIPOFFSETS);
    TiffEntry *counts = raw->getEntry(STRIPBYTECOUNTS);
    // Iterate through all slices
    for (uint32 s = 0; s < offsets->count; s++) {
      Cr2Slice slice;
      slice.offset = offsets[0].getInt();
      slice.count = counts[0].getInt();
      SOFInfo sof;
      LJpegPlain *l = new LJpegPlain(mFile, mRaw);
      l->getSOF(&sof, slice.offset, slice.count);
      delete l;
      slice.w = sof.w * sof.cps;
      slice.h = sof.h;
      if (sof.cps == 4 && slice.w > slice.h * 4) {
        doubleHeight = true;
      }
      if (!slices.empty())
        if (slices[0].w != slice.w)
          ThrowRDE("CR2 Decoder: Slice width does not match.");

      if (mFile->isValid(slice.offset, slice.count)) // Only decode if size is valid
        slices.push_back(slice);
      completeH += slice.h;
    }
  } catch (TiffParserException) {
    ThrowRDE("CR2 Decoder: Unsupported format.");
  }

  // Override with canon_double_height if set.
  map<string,string>::iterator msb_hint = hints.find("canon_double_height");
  if (msb_hint != hints.end())
    doubleHeight = (0 == (msb_hint->second).compare("true"));

  if (slices.empty()) {
    ThrowRDE("CR2 Decoder: No Slices found.");
  }
  mRaw->dim = iPoint2D(slices[0].w, completeH);

  // Fix for Canon 6D mRaw, which has flipped width & height for some part of the image
  // In that case, we swap width and height, since this is the correct dimension
  bool flipDims = false;
  bool wrappedCr2Slices = false;
  if (raw->hasEntry((TiffTag)0xc6c5)) {
    ushort16 ss = raw->getEntry((TiffTag)0xc6c5)->getInt();
    // sRaw
    if (ss == 4) {
      mRaw->dim.x /= 3;
      mRaw->setCpp(3);
      mRaw->isCFA = false;

      // Fix for Canon 80D mraw format.
      // In that format, the frame (as read by getSOF()) is 4032x3402, while the
      // real image should be 4536x3024 (where the full vertical slices in
      // the frame "wrap around" the image.
      if (hints.find("wrapped_cr2_slices") != hints.end() && raw->hasEntry(IMAGEWIDTH) && raw->hasEntry(IMAGELENGTH)) {
        wrappedCr2Slices = true;
        int w = raw->getEntry(IMAGEWIDTH)->getInt();
        int h = raw->getEntry(IMAGELENGTH)->getInt();
        if (w * h != mRaw->dim.x * mRaw->dim.y) {
          ThrowRDE("CR2 Decoder: Wrapped slices don't match image size");
        }
        mRaw->dim = iPoint2D(w, h);
      }
    }
    flipDims = mRaw->dim.x < mRaw->dim.y;
    if (flipDims) {
      int w = mRaw->dim.x;
      mRaw->dim.x = mRaw->dim.y;
      mRaw->dim.y = w;
    }
  }

  mRaw->createData();

  vector<int> s_width;
  if (raw->hasEntry(CANONCR2SLICE)) {
    TiffEntry *ss = raw->getEntry(CANONCR2SLICE);
    for (int i = 0; i < ss->getShort(0); i++) {
      s_width.push_back(ss->getShort(1));
    }
    s_width.push_back(ss->getShort(2));
  } else {
    s_width.push_back(slices[0].w);
  }
  uint32 offY = 0;

  if (s_width.size() > 15)
    ThrowRDE("CR2 Decoder: No more than 15 slices supported");
  _RPT1(0,"Org slices:%d\n", s_width.size());
  for (uint32 i = 0; i < slices.size(); i++) {
    Cr2Slice slice = slices[i];
    try {
      LJpegPlain *l = new LJpegPlain(mFile, mRaw);
      l->addSlices(s_width);
      l->mUseBigtable = true;
      l->mCanonFlipDim = flipDims;
      l->mCanonDoubleHeight = doubleHeight;
      l->mWrappedCr2Slices = wrappedCr2Slices;
      l->startDecoder(slice.offset, slice.count, 0, offY);
      delete l;
    } catch (RawDecoderException &e) {
      if (i == 0)
        throw;
      // These may just be single slice error - store the error and move on
      mRaw->setError(e.what());
    } catch (IOException &e) {
      // Let's try to ignore this - it might be truncated data, so something might be useful.
      mRaw->setError(e.what());
    }
    offY += slice.w;
  }

  if (mRaw->metadata.subsampling.x > 1 || mRaw->metadata.subsampling.y > 1)
    sRawInterpolate();

  return mRaw;
}
bool CaptureFlycap::convertFrame(const RawImage & src,
                           RawImage & target,
                           ColorFormat output_fmt,
                           bool debayer,
                           dc1394color_filter_t bayer_format,
                           dc1394bayer_method_t bayer_method,
                           int y16bits) {
  mutex.lock();

  int width = v_width->getInt();
  int height = v_height->getInt();

  ColorFormat src_fmt=src.getColorFormat();
  if (target.getData()==0) {
    //allocate target, if it does not exist yet
    target.allocate(output_fmt,src.getWidth(),src.getHeight());
  } else {
    target.ensure_allocation(output_fmt,src.getWidth(),src.getHeight());
  }
  target.setTime(src.getTime());
  if (output_fmt==src_fmt) {
    //just do a memcpy
    memcpy(target.getData(),src.getData(),src.getNumBytes());
  } else {
    //do some more fancy conversion
    if ((src_fmt==COLOR_MONO8 || src_fmt==COLOR_RAW8) && output_fmt==COLOR_RGB8) {
      //check whether to debayer or simply average to a grey rgb image
      if (debayer) {
        //de-bayer
        if ( dc1394_bayer_decoding_8bit( src.getData(), target.getData(), src.getWidth(), src.getHeight(), bayer_format, bayer_method) != DC1394_SUCCESS ) {
            mutex.unlock();
          return false;
        }
      } else {
        dc1394_convert_to_RGB8(src.getData(),target.getData(), width, height, 0,
                       DC1394_COLOR_CODING_MONO8, 8);
        //Conversions::y2rgb (src.getData(), target.getData(), src.getNumPixels());
      }
    } else if ((src_fmt==COLOR_MONO16 || src_fmt==COLOR_RAW16)) {
      //check whether to debayer or simply average to a grey rgb image
      if (debayer && output_fmt==COLOR_RGB16) {
        //de-bayer
        if ( dc1394_bayer_decoding_16bit( (uint16_t *)src.getData(), (uint16_t *)target.getData(), src.getWidth(), src.getHeight(), bayer_format, bayer_method, y16bits) != DC1394_SUCCESS ) {
          fprintf(stderr,"Error in 16bit Bayer Conversion");

            mutex.unlock();
          return false;
        }
      } else if (debayer==false && output_fmt==COLOR_RGB8) {

       dc1394_convert_to_RGB8(src.getData(),target.getData(), width, height, 0,
                       ((src_fmt==COLOR_MONO16) ? DC1394_COLOR_CODING_MONO16 : DC1394_COLOR_CODING_RAW16), y16bits);
        //Conversions::y162rgb (src.getData(), target.getData(), src.getNumPixels(), y16bits);
      } else {
      fprintf(stderr,"Cannot copy and convert frame...unknown conversion selected from: %s to %s\n",Colors::colorFormatToString(src_fmt).c_str(),Colors::colorFormatToString(output_fmt).c_str());
      mutex.unlock();
      return false;
      }
    } else if (src_fmt==COLOR_YUV411 && output_fmt==COLOR_RGB8) {
      dc1394_convert_to_RGB8(src.getData(),target.getData(), width, height, 0,
                       DC1394_COLOR_CODING_YUV411, 8);
      //Conversions::uyyvyy2rgb (src.getData(), target.getData(), src.getNumPixels());
    } else if (src_fmt==COLOR_YUV422_UYVY && output_fmt==COLOR_RGB8) {
        dc1394_convert_to_RGB8(src.getData(),target.getData(), width, height, DC1394_BYTE_ORDER_UYVY,
                       DC1394_COLOR_CODING_YUV422, 8);
    } else if (src_fmt==COLOR_YUV422_YUYV && output_fmt==COLOR_RGB8) {
        dc1394_convert_to_RGB8(src.getData(),target.getData(), width, height, DC1394_BYTE_ORDER_YUYV,
                       DC1394_COLOR_CODING_YUV422, 8);
    } else if (src_fmt==COLOR_YUV444 && output_fmt==COLOR_RGB8) {
      dc1394_convert_to_RGB8(src.getData(),target.getData(), width, height, 0,
                       DC1394_COLOR_CODING_YUV444, 8);
    } else {
      fprintf(stderr,"Cannot copy and convert frame...unknown conversion selected from: %s to %s\n",Colors::colorFormatToString(src_fmt).c_str(),Colors::colorFormatToString(output_fmt).c_str());
      mutex.unlock();
      return false;
    }
  }

  mutex.unlock();
  return true;
}