Example #1
0
ImageOrientation ImageDecoder::frameOrientationAtIndex(size_t index) const
{
    RetainPtr<CFDictionaryRef> properties = adoptCF(CGImageSourceCopyPropertiesAtIndex(m_nativeDecoder.get(), index, imageSourceOptions().get()));
    if (!properties)
        return ImageOrientation();
    
    return orientationFromProperties(properties.get());
}
Example #2
0
ImageOrientation BitmapImage::frameOrientationAtIndex(size_t index)
{
    if (!ensureFrameIsCached(index, CacheMetadataOnly))
        return ImageOrientation();

    if (m_frames[index].m_haveMetadata)
        return m_frames[index].m_orientation;

    return m_source.orientationAtIndex(index);
}
Example #3
0
static ImageOrientation orientationFromProperties(CFDictionaryRef imageProperties)
{
    ASSERT(imageProperties);
    CFNumberRef orientationProperty = (CFNumberRef)CFDictionaryGetValue(imageProperties, kCGImagePropertyOrientation);
    if (!orientationProperty)
        return ImageOrientation();
    
    int exifValue;
    CFNumberGetValue(orientationProperty, kCFNumberIntType, &exifValue);
    return ImageOrientation::fromEXIFValue(exifValue);
}
Example #4
0
static ImageOrientation readImageOrientation(jpeg_decompress_struct* info)
{
    // The JPEG decoder looks at EXIF metadata.
    // FIXME: Possibly implement XMP and IPTC support.
    const unsigned orientationTag = 0x112;
    const unsigned shortType = 3;
    for (jpeg_saved_marker_ptr marker = info->marker_list; marker; marker = marker->next) {
        bool isBigEndian;
        unsigned ifdOffset;
        if (!checkExifHeader(marker, isBigEndian, ifdOffset))
            continue;
        const unsigned offsetToTiffData = 6; // Account for 'Exif\0<fill byte>' header.
        if (marker->data_length < offsetToTiffData || ifdOffset >= marker->data_length - offsetToTiffData)
            continue;
        ifdOffset += offsetToTiffData;

        // The jpeg exif container format contains a tiff block for metadata.
        // A tiff image file directory (ifd) consists of a uint16_t describing
        // the number of ifd entries, followed by that many entries.
        // When touching this code, it's useful to look at the tiff spec:
        // http://partners.adobe.com/public/developer/en/tiff/TIFF6.pdf
        JOCTET* ifd = marker->data + ifdOffset;
        JOCTET* end = marker->data + marker->data_length;
        if (end - ifd < 2)
            continue;
        unsigned tagCount = readUint16(ifd, isBigEndian);
        ifd += 2; // Skip over the uint16 that was just read.

        // Every ifd entry is 2 bytes of tag, 2 bytes of contents datatype,
        // 4 bytes of number-of-elements, and 4 bytes of either offset to the
        // tag data, or if the data is small enough, the inlined data itself.
        const int ifdEntrySize = 12;
        for (unsigned i = 0; i < tagCount && end - ifd >= ifdEntrySize; ++i, ifd += ifdEntrySize) {
            unsigned tag = readUint16(ifd, isBigEndian);
            unsigned type = readUint16(ifd + 2, isBigEndian);
            unsigned count = readUint32(ifd + 4, isBigEndian);
            if (tag == orientationTag && type == shortType && count == 1)
                return ImageOrientation::fromEXIFValue(readUint16(ifd + 8, isBigEndian));
        }
    }

    return ImageOrientation();
}
Example #5
0
ImageOrientation ImageDecoder::frameOrientationAtIndex(size_t index) const
{
    notImplemented();
    return ImageOrientation();
}
Example #6
0
/** Compute SIFT feature
 *  @param gray_im A grayscale image
 *  @param[out] sift_feature The output SIFT feature
 */
void SIFT::CalcSIFT(BYTE* gray_im, double* sift_feature)
{
  double* lf_gray_im = new double[param.image_pixel];
  double max = 0.000001;
  for (int pt = 0; pt < param.image_pixel; pt++)
  {
	  lf_gray_im[pt] = gray_im[pt];
	  if (lf_gray_im[pt] > max)
		  max = lf_gray_im[pt];
  }

  for (int pt = 0; pt < param.image_pixel; pt++)
  {
	  lf_gray_im[pt] = lf_gray_im[pt] / max;
  }

  double* im_orientation = new double[param.image_pixel * param.angle_nums];
  double* conv_im = new double[param.image_pixel * param.angle_nums];
  memset(conv_im, 0, param.image_pixel * param.angle_nums * sizeof(double));

  ImageOrientation(lf_gray_im, im_orientation);
  ConvImage(im_orientation, conv_im);

  // Generate denseSIFT feature vector
  double* patch_feature = new double[param.patch_dims];
  int patch_cnt = 0;

  // Sliding windows on overlapping patches. (px,py) are centroids
  for (int location_x = param.patch_size / 2; location_x <= param.image_height - (param.patch_size / 2); location_x += param.grid_spacing)
  {
	  for (int location_y = param.patch_size / 2; location_y <= param.image_width - (param.patch_size / 2); location_y += param.grid_spacing)
	  {
		  memset(patch_feature, 0, param.patch_dims * sizeof(double));

		  double l2_norm = 0.000001;
		  int Point_cnt = 0;

		  for (int p_x = -param.patch_size / 2; p_x <= param.patch_size / 2 - param.sample_pixel; p_x += param.sample_pixel)
		  {
			  for (int p_y = -param.patch_size / 2; p_y <= param.patch_size / 2 - param.sample_pixel; p_y += param.sample_pixel)
			  {
				  int i = location_x + p_x;
				  int j = location_y + p_y;

				  for (int index = 0; index < param.angle_nums; index++)
				  {
					  patch_feature[Point_cnt] = conv_im[index * param.image_pixel + j * param.image_height + i];
					  l2_norm += pow(patch_feature[Point_cnt], 2);
					  Point_cnt += 1;
				  }
			  }
		  }
		  // Patch-wise L2-norm
		  double norm = 1.0 / sqrt(l2_norm);
		  for (int pt = 0; pt < param.patch_dims; pt++)
		  {
			  patch_feature[pt] = patch_feature[pt] * norm;
		  }

		  memcpy(&sift_feature[patch_cnt * param.patch_dims], patch_feature, param.patch_dims * sizeof(double));
		  patch_cnt += 1;
	  }
  }

  delete[] lf_gray_im;
  delete[] im_orientation;
  delete[] conv_im;
  delete[] patch_feature;
}