Пример #1
1
bool
WebpInput::open (const std::string &name, ImageSpec &spec)
{
    m_filename = name;

    m_file = Filesystem::fopen(m_filename, "rb");
    if (!m_file)
    {
        error ("Could not open file \"%s\"", m_filename.c_str());
        return false;
    }

    fseek (m_file, 0, SEEK_END);
    m_image_size = ftell(m_file);
    fseek (m_file, 0, SEEK_SET);

    std::vector<uint8_t> encoded_image;
    encoded_image.resize(m_image_size, 0);
    size_t numRead = fread(&encoded_image[0], sizeof(uint8_t), encoded_image.size(), m_file);
    if (numRead != encoded_image.size()) {
    	error ("Read failure for \"%s\" (expected %d bytes, read %d)",
               m_filename, encoded_image.size(), numRead);
    	close ();
    	return false;
    }

    int width = 0, height = 0;
    if(!WebPGetInfo(&encoded_image[0], encoded_image.size(), &width, &height))
    {
        error ("%s is not a WebP image file", m_filename.c_str());
        close();
        return false;
    }

    const int CHANNEL_NUM = 4;
    m_scanline_size = width * CHANNEL_NUM;
    m_spec = ImageSpec(width, height, CHANNEL_NUM, TypeDesc::UINT8);
    spec = m_spec;

    if (!(m_decoded_image = WebPDecodeRGBA(&encoded_image[0], encoded_image.size(), &m_spec.width, &m_spec.height)))
    {
        error ("Couldn't decode %s", m_filename.c_str());
        close();
        return false;
    }
    return true;
}
Пример #2
0
void
Tile::parse_images(const lisp::Lisp& images_lisp)
{
  const lisp::Lisp* list = &images_lisp;
  while(list) {
    const lisp::Lisp* cur = list->get_car();
    if(cur->get_type() == lisp::Lisp::TYPE_STRING) {
      std::string file;
      cur->get(file);
      imagespecs.push_back(ImageSpec(file, Rect(0, 0, 0, 0)));
    } else if(cur->get_type() == lisp::Lisp::TYPE_CONS && 
        cur->get_car()->get_type() == lisp::Lisp::TYPE_SYMBOL) {
      const lisp::Lisp* ptr = cur->get_cdr();

      std::string file;
      float x = 0, y = 0, w = 0, h = 0;
      ptr->get_car()->get(file); ptr = ptr->get_cdr();
      ptr->get_car()->get(x); ptr = ptr->get_cdr();
      ptr->get_car()->get(y); ptr = ptr->get_cdr();
      ptr->get_car()->get(w); ptr = ptr->get_cdr();
      ptr->get_car()->get(h);
      imagespecs.push_back(ImageSpec(file, Rect(x, y, x+w, y+h)));
    } else {
      log_warning << "Expected string or list in images tag" << std::endl;
      continue;
    }
    
    list = list->get_cdr();
  }
}
Пример #3
0
int main (int argc, char *argv[])
{
#if !defined(NDEBUG) || defined(OIIO_CI) || defined(OIIO_CODECOV)
    // For the sake of test time, reduce the default iterations for DEBUG,
    // CI, and code coverage builds. Explicit use of --iters or --trials
    // will override this, since it comes before the getargs() call.
    iterations /= 10;
    ntrials = 1;
#endif

    getargs (argc, argv);

    // Initialize
    imgA.reset (ImageSpec (xres, yres, channels, TypeDesc::FLOAT));
    imgB.reset (ImageSpec (xres, yres, channels, TypeDesc::FLOAT));
    imgR.reset (ImageSpec (xres, yres, channels, TypeDesc::FLOAT));
    float red[3]  = { 1, 0, 0 };
    float green[3] = { 0, 1, 0 };
    float blue[3]  = { 0, 0, 1 };
    float black[3] = { 0, 0, 0 };
    ImageBufAlgo::fill (imgA, red, green, red, green);
    ImageBufAlgo::fill (imgB, blue, blue, black, black);
    // imgA.write ("A.exr");
    // imgB.write ("B.exr");

    test_compute ();

    return unit_test_failures;
}
Пример #4
0
bool ImageManager::file_load_image_generic(Image *img, ImageInput **in, int &width, int &height, int &depth, int &components)
{
	if(img->filename == "")
		return false;

	if(!img->builtin_data) {
		/* load image from file through OIIO */
		*in = ImageInput::create(img->filename);

		if(!*in)
			return false;

		ImageSpec spec = ImageSpec();
		ImageSpec config = ImageSpec();

		if(img->use_alpha == false)
			config.attribute("oiio:UnassociatedAlpha", 1);

		if(!(*in)->open(img->filename, spec, config)) {
			delete *in;
			*in = NULL;
			return false;
		}

		width = spec.width;
		height = spec.height;
		depth = spec.depth;
		components = spec.nchannels;
	}
	else {
		/* load image using builtin images callbacks */
		if(!builtin_image_info_cb || !builtin_image_pixels_cb)
			return false;

		bool is_float;
		builtin_image_info_cb(img->filename, img->builtin_data, is_float, width, height, depth, components);
	}

	/* we only handle certain number of components */
	if(!(components >= 1 && components <= 4)) {
		if(*in) {
			(*in)->close();
			delete *in;
			*in = NULL;
		}

		return false;
	}

	return true;
}
Пример #5
0
bool
HdrInput::seek_subimage(int subimage, int miplevel)
{
    // HDR doesn't support multiple subimages or mipmaps
    if (subimage != 0 || miplevel != 0)
        return false;

    // Skip the hard work if we're already on the requested subimage
    if (subimage == current_subimage()) {
        return true;
    }

    close();

    // Check that file exists and can be opened
    m_fd = Filesystem::fopen(m_filename, "rb");
    if (m_fd == NULL) {
        error("Could not open file \"%s\"", m_filename.c_str());
        return false;
    }

    rgbe_header_info h;
    int width, height;
    int r = RGBE_ReadHeader(m_fd, &width, &height, &h, rgbe_error);
    if (r != RGBE_RETURN_SUCCESS) {
        error("%s", rgbe_error);
        close();
        return false;
    }

    m_spec = ImageSpec(width, height, 3, TypeDesc::FLOAT);

    if (h.valid & RGBE_VALID_GAMMA) {
        // Round gamma to the nearest hundredth to prevent stupid
        // precision choices and make it easier for apps to make
        // decisions based on known gamma values. For example, you want
        // 2.2, not 2.19998.
        float g = float(1.0 / h.gamma);
        g       = roundf(100.0 * g) / 100.0f;
        m_spec.attribute("oiio:Gamma", g);
        if (g == 1.0f)
            m_spec.attribute("oiio:ColorSpace", "linear");
        else
            m_spec.attribute("oiio:ColorSpace",
                             Strutil::sprintf("GammaCorrected%.2g", g));
    } else {
        // Presume linear color space
        m_spec.attribute("oiio:ColorSpace", "linear");
    }
    if (h.valid & RGBE_VALID_ORIENTATION)
        m_spec.attribute("Orientation", h.orientation);

    // FIXME -- should we do anything about exposure, software,
    // pixaspect, primaries?  (N.B. rgbe.c doesn't even handle most of them)

    m_subimage      = subimage;
    m_next_scanline = 0;
    return true;
}
Пример #6
0
Doc* Docs::add(int width, int height, doc::ColorMode colorMode, int ncolors)
{
  std::unique_ptr<Doc> doc(
    new Doc(Sprite::createBasicSprite(ImageSpec(colorMode, width, height), ncolors)));
  doc->setFilename("Sprite");
  doc->setContext(m_ctx); // Change the document context to add the doc in this collection

  return doc.release();
}
Пример #7
0
void
ImageBuf::clear ()
{
    m_name.clear ();
    m_fileformat.clear ();
    m_nsubimages = 0;
    m_current_subimage = -1;
    m_current_miplevel = -1;
    m_spec = ImageSpec ();
    m_nativespec = ImageSpec ();
    std::vector<char>().swap (m_pixels);  // clear it with deallocation
    m_localpixels = NULL;
    m_clientpixels = false;
    m_spec_valid = false;
    m_pixels_valid = false;
    m_badfile = false;
    m_orientation = 1;
    m_pixelaspect = 1;
    m_deepdata.free ();
}
Пример #8
0
int main (int argc, char *argv[])
{
    getargs (argc, argv);

    // Initialize
    imgA.reset (ImageSpec (xres, yres, channels, TypeDesc::FLOAT));
    imgB.reset (ImageSpec (xres, yres, channels, TypeDesc::FLOAT));
    imgR.reset (ImageSpec (xres, yres, channels, TypeDesc::FLOAT));
    float red[3]  = { 1, 0, 0 };
    float green[3] = { 0, 1, 0 };
    float blue[3]  = { 0, 0, 1 };
    float black[3] = { 0, 0, 0 };
    ImageBufAlgo::fill (imgA, red, green, red, green);
    ImageBufAlgo::fill (imgB, blue, blue, black, black);
    // imgA.write ("A.exr");
    // imgB.write ("B.exr");

    test_compute ();

    return unit_test_failures;
}
Пример #9
0
bool
OpenEXRInput::open (const std::string &name, ImageSpec &newspec)
{
    // Quick check to reject non-exr files
    if (! Filesystem::is_regular (name)) {
        error ("Could not open file \"%s\"", name.c_str());
        return false;
    }
    bool tiled;
    if (! Imf::isOpenExrFile (name.c_str(), tiled)) {
        error ("\"%s\" is not an OpenEXR file", name.c_str());
        return false;
    }

    pvt::set_exr_threads ();

    m_spec = ImageSpec(); // Clear everything with default constructor
    
    try {
        m_input_stream = new OpenEXRInputStream (name.c_str());
    } catch (const std::exception &e) {
        m_input_stream = NULL;
        error ("OpenEXR exception: %s", e.what());
        return false;
    } catch (...) {   // catch-all for edge cases or compiler bugs
        m_input_stream = NULL;
        error ("OpenEXR exception: unknown");
        return false;
    }

    try {
        m_input_multipart = new Imf::MultiPartInputFile (*m_input_stream);
    } catch (const std::exception &e) {
        delete m_input_stream;
        m_input_stream = NULL;
        error ("OpenEXR exception: %s", e.what());
        return false;
    } catch (...) {   // catch-all for edge cases or compiler bugs
        m_input_stream = NULL;
        error ("OpenEXR exception: unknown");
        return false;
    }

    m_nsubimages = m_input_multipart->parts();
    m_parts.resize (m_nsubimages);
    m_subimage = -1;
    m_miplevel = -1;
    bool ok = seek_subimage (0, 0, newspec);
    if (! ok)
        close ();
    return ok;
}
Пример #10
0
bool
HdrInput::seek_subimage (int subimage, int miplevel, ImageSpec &newspec)
{
    // HDR doesn't support multiple subimages or mipmaps
    if (subimage != 0 || miplevel != 0)
        return false;

    // Skip the hard work if we're already on the requested subimage
    if (subimage == current_subimage()) {
        newspec = spec();
        return true;
    }

    close();

    // Check that file exists and can be opened
    m_fd = Filesystem::fopen (m_filename, "rb");
    if (m_fd == NULL) {
        error ("Could not open file \"%s\"", m_filename.c_str());
        return false;
    }

    rgbe_header_info h;
    int width, height;
    int r = RGBE_ReadHeader (m_fd, &width, &height, &h, rgbe_error);
    if (r != RGBE_RETURN_SUCCESS) {
        error ("%s", rgbe_error);
        close ();
        return false;
    }

    m_spec = ImageSpec (width, height, 3, TypeDesc::FLOAT);

    if (h.valid & RGBE_VALID_GAMMA)
        m_spec.attribute ("oiio:Gamma", h.gamma);
    if (h.valid & RGBE_VALID_ORIENTATION)
        m_spec.attribute ("Orientation", h.orientation);

    // FIXME -- should we do anything about exposure, software,
    // pixaspect, primaries?  (N.B. rgbe.c doesn't even handle most of them)

    m_subimage = subimage;
    m_next_scanline = 0;
    newspec = m_spec;
    return true;
}
Пример #11
0
bool
ZfileInput::open (const std::string &name, ImageSpec &newspec)
{
    m_filename = name;

    FILE *fd = Filesystem::fopen (name, "rb");
    m_gz = (fd) ? gzdopen (fileno (fd), "rb") : NULL;
    if (! m_gz) {
        if (fd)
            fclose (fd);
        error ("Could not open file \"%s\"", name.c_str());
        return false;
    }

    ZfileHeader header;
    ASSERT (sizeof(header) == 136);
    gzread (m_gz, &header, sizeof(header));

    if (header.magic != zfile_magic && header.magic != zfile_magic_endian) {
        error ("Not a valid Zfile");
        return false;
    }

    m_swab = (header.magic == zfile_magic_endian);
    if (m_swab) {
        swap_endian (&header.width);
        swap_endian (&header.height);
        swap_endian ((float *)&header.worldtoscreen, 16);
        swap_endian ((float *)&header.worldtocamera, 16);
    }

    m_spec = ImageSpec (header.width, header.height, 1, TypeDesc::FLOAT);
    if (m_spec.channelnames.size() == 0)
        m_spec.channelnames.emplace_back("z");
    else
        m_spec.channelnames[0] = "z";
    m_spec.z_channel = 0;

    m_spec.attribute ("worldtoscreen", TypeDesc::TypeMatrix,
                      (float *)&header.worldtoscreen);
    m_spec.attribute ("worldtocamera", TypeDesc::TypeMatrix,
                      (float *)&header.worldtocamera);

    newspec = spec ();
    return true;
}
Пример #12
0
bool
ImageBufAlgo::histogram_draw (ImageBuf &R,
                              const std::vector<imagesize_t> &histogram)
{
    // Fail if there are no bins to draw.
    int bins = histogram.size();
    if (bins == 0) {
        R.error ("There are no bins to draw, the histogram is empty");
        return false;
    }

    // Check R and modify it if needed.
    int height = R.spec().height;
    if (R.spec().format != TypeDesc::TypeFloat || R.nchannels() != 1 ||
        R.spec().width != bins) {
        ImageSpec newspec = ImageSpec (bins, height, 1, TypeDesc::FLOAT);
        R.reset ("dummy", newspec);
    }

    // Fill output image R with white color.
    ImageBuf::Iterator<float, float> r (R);
    for ( ; ! r.done(); ++r)
        r[0] = 1;

    // Draw histogram left->right, bottom->up.
    imagesize_t max = *std::max_element (histogram.begin(), histogram.end());
    for (int b = 0; b < bins; b++) {
        int bin_height = (int) ((float)histogram[b]/(float)max*height + 0.5f);
        if (bin_height != 0) {
            // Draw one bin at column b.
            for (int j = 1; j <= bin_height; j++) {
                int row = height - j;
                r.pos (b, row);
                r[0] = 0;
            }
        }
    }
    return true;
}
Пример #13
0
bool
FitsInput::set_spec_info ()
{
    keys.clear ();
    // FITS spec doesn't say anything about color space or
    // number of channels, so we read all images as if they
    // all were one-channel images
    m_spec = ImageSpec(0, 0, 1, TypeDesc::UNKNOWN);

    // reading info about current subimage
    if (! read_fits_header ())
        return false;

    // we don't deal with one dimension images
    // it's some kind of spectral data
    if (! m_spec.width || ! m_spec.height) {
        m_spec.width = m_spec.full_width = 0;
        m_spec.height = m_spec.full_height = 0;
    }

    // now we can get the current position in the file
    // this is the start of the image data
    // we will need it in the read_native_scanline method
    fgetpos(m_fd, &m_filepos);

    if (m_bitpix == 8)
        m_spec.set_format (TypeDesc::UCHAR);
    else if (m_bitpix == 16)
        m_spec.set_format (TypeDesc::USHORT);
    else if (m_bitpix == 32)
        m_spec.set_format (TypeDesc::UINT);
    else if (m_bitpix == -32)
        m_spec.set_format (TypeDesc::FLOAT);
    else if (m_bitpix == -64)
        m_spec.set_format (TypeDesc::DOUBLE);
    return true;
}
Пример #14
0
void
OpenEXRInput::PartInfo::parse_header (const Imf::Header *header)
{
    if (initialized)
        return;

    ASSERT (header);
    spec = ImageSpec();

    top_datawindow = header->dataWindow();
    top_displaywindow = header->displayWindow();
    spec.x = top_datawindow.min.x;
    spec.y = top_datawindow.min.y;
    spec.z = 0;
    spec.width  = top_datawindow.max.x - top_datawindow.min.x + 1;
    spec.height = top_datawindow.max.y - top_datawindow.min.y + 1;
    spec.depth = 1;
    topwidth = spec.width;      // Save top-level mipmap dimensions
    topheight = spec.height;
    spec.full_x = top_displaywindow.min.x;
    spec.full_y = top_displaywindow.min.y;
    spec.full_z = 0;
    spec.full_width  = top_displaywindow.max.x - top_displaywindow.min.x + 1;
    spec.full_height = top_displaywindow.max.y - top_displaywindow.min.y + 1;
    spec.full_depth = 1;
    spec.tile_depth = 1;

    if (header->hasTileDescription()) {
        const Imf::TileDescription &td (header->tileDescription());
        spec.tile_width = td.xSize;
        spec.tile_height = td.ySize;
        levelmode = td.mode;
        roundingmode = td.roundingMode;
        if (levelmode == Imf::MIPMAP_LEVELS)
            nmiplevels = numlevels (std::max(topwidth,topheight), roundingmode);
        else if (levelmode == Imf::RIPMAP_LEVELS)
            nmiplevels = numlevels (std::max(topwidth,topheight), roundingmode);
        else
            nmiplevels = 1;
    } else {
        spec.tile_width = 0;
        spec.tile_height = 0;
        levelmode = Imf::ONE_LEVEL;
        nmiplevels = 1;
    }
    query_channels (header);   // also sets format

#ifdef USE_OPENEXR_VERSION2
    spec.deep = Strutil::istarts_with (header->type(), "deep");
#endif

    // Unless otherwise specified, exr files are assumed to be linear.
    spec.attribute ("oiio:ColorSpace", "Linear");

    if (levelmode != Imf::ONE_LEVEL)
        spec.attribute ("openexr:roundingmode", roundingmode);

    const Imf::EnvmapAttribute *envmap;
    envmap = header->findTypedAttribute<Imf::EnvmapAttribute>("envmap");
    if (envmap) {
        cubeface = (envmap->value() == Imf::ENVMAP_CUBE);
        spec.attribute ("textureformat", cubeface ? "CubeFace Environment" : "LatLong Environment");
        // OpenEXR conventions for env maps
        if (! cubeface)
            spec.attribute ("oiio:updirection", "y");
        spec.attribute ("oiio:sampleborder", 1);
        // FIXME - detect CubeFace Shadow?
    } else {
        cubeface = false;
        if (spec.tile_width && levelmode == Imf::MIPMAP_LEVELS)
            spec.attribute ("textureformat", "Plain Texture");
        // FIXME - detect Shadow
    }

    const Imf::CompressionAttribute *compressattr;
    compressattr = header->findTypedAttribute<Imf::CompressionAttribute>("compression");
    if (compressattr) {
        const char *comp = NULL;
        switch (compressattr->value()) {
        case Imf::NO_COMPRESSION    : comp = "none"; break;
        case Imf::RLE_COMPRESSION   : comp = "rle"; break;
        case Imf::ZIPS_COMPRESSION  : comp = "zips"; break;
        case Imf::ZIP_COMPRESSION   : comp = "zip"; break;
        case Imf::PIZ_COMPRESSION   : comp = "piz"; break;
        case Imf::PXR24_COMPRESSION : comp = "pxr24"; break;
#ifdef IMF_B44_COMPRESSION
            // The enum Imf::B44_COMPRESSION is not defined in older versions
            // of OpenEXR, and there are no explicit version numbers in the
            // headers.  BUT this other related #define is present only in
            // the newer version.
        case Imf::B44_COMPRESSION   : comp = "b44"; break;
        case Imf::B44A_COMPRESSION  : comp = "b44a"; break;
#endif
#if defined(OPENEXR_VERSION_MAJOR) && \
    (OPENEXR_VERSION_MAJOR*10000+OPENEXR_VERSION_MINOR*100+OPENEXR_VERSION_PATCH) >= 20200
        case Imf::DWAA_COMPRESSION  : comp = "dwaa"; break;
        case Imf::DWAB_COMPRESSION  : comp = "dwab"; break;
#endif
        default:
            break;
        }
        if (comp)
            spec.attribute ("compression", comp);
    }

    for (Imf::Header::ConstIterator hit = header->begin();
             hit != header->end();  ++hit) {
        const Imf::IntAttribute *iattr;
        const Imf::FloatAttribute *fattr;
        const Imf::StringAttribute *sattr;
        const Imf::M44fAttribute *mattr;
        const Imf::V3fAttribute *v3fattr;
        const Imf::V3iAttribute *v3iattr;
        const Imf::V2fAttribute *v2fattr;
        const Imf::V2iAttribute *v2iattr;
        const Imf::Box2iAttribute *b2iattr;
        const Imf::Box2fAttribute *b2fattr;
        const Imf::TimeCodeAttribute *tattr;
        const Imf::KeyCodeAttribute *kcattr;
#ifdef USE_OPENEXR_VERSION2
        const Imf::StringVectorAttribute *svattr;
#endif
        const char *name = hit.name();
        std::string oname = exr_tag_to_ooio_std[name];
        if (oname.empty())   // Empty string means skip this attrib
            continue;
//        if (oname == name)
//            oname = std::string(format_name()) + "_" + oname;
        const Imf::Attribute &attrib = hit.attribute();
        std::string type = attrib.typeName();
        if (type == "string" && 
            (sattr = header->findTypedAttribute<Imf::StringAttribute> (name)))
            spec.attribute (oname, sattr->value().c_str());
        else if (type == "int" && 
            (iattr = header->findTypedAttribute<Imf::IntAttribute> (name)))
            spec.attribute (oname, iattr->value());
        else if (type == "float" && 
            (fattr = header->findTypedAttribute<Imf::FloatAttribute> (name)))
            spec.attribute (oname, fattr->value());
        else if (type == "m44f" && 
            (mattr = header->findTypedAttribute<Imf::M44fAttribute> (name)))
            spec.attribute (oname, TypeDesc::TypeMatrix, &(mattr->value()));
        else if (type == "v3f" &&
                 (v3fattr = header->findTypedAttribute<Imf::V3fAttribute> (name)))
            spec.attribute (oname, TypeDesc::TypeVector, &(v3fattr->value()));
        else if (type == "v3i" &&
                 (v3iattr = header->findTypedAttribute<Imf::V3iAttribute> (name))) {
            TypeDesc v3 (TypeDesc::INT, TypeDesc::VEC3, TypeDesc::VECTOR);
            spec.attribute (oname, v3, &(v3iattr->value()));
        }
        else if (type == "v2f" &&
                 (v2fattr = header->findTypedAttribute<Imf::V2fAttribute> (name))) {
            TypeDesc v2 (TypeDesc::FLOAT,TypeDesc::VEC2);
            spec.attribute (oname, v2, &(v2fattr->value()));
        }
        else if (type == "v2i" &&
                 (v2iattr = header->findTypedAttribute<Imf::V2iAttribute> (name))) {
            TypeDesc v2 (TypeDesc::INT,TypeDesc::VEC2);
            spec.attribute (oname, v2, &(v2iattr->value()));
        }
#ifdef USE_OPENEXR_VERSION2
        else if (type == "stringvector" &&
            (svattr = header->findTypedAttribute<Imf::StringVectorAttribute> (name))) {
            std::vector<std::string> strvec = svattr->value();
            std::vector<ustring> ustrvec (strvec.size());
            for (size_t i = 0, e = strvec.size();  i < e;  ++i)
                ustrvec[i] = strvec[i];
            TypeDesc sv (TypeDesc::STRING, ustrvec.size());
            spec.attribute(oname, sv, &ustrvec[0]);
        }
#endif
        else if (type == "box2i" &&
                 (b2iattr = header->findTypedAttribute<Imf::Box2iAttribute> (name))) {
            TypeDesc bx (TypeDesc::INT, TypeDesc::VEC2, 2);
            spec.attribute (oname, bx, &b2iattr->value());
        }
        else if (type == "box2f" &&
                 (b2fattr = header->findTypedAttribute<Imf::Box2fAttribute> (name))) {
            TypeDesc bx (TypeDesc::FLOAT, TypeDesc::VEC2, 2);
            spec.attribute (oname, bx, &b2fattr->value());
        }
        else if (type == "timecode" &&
                 (tattr = header->findTypedAttribute<Imf::TimeCodeAttribute> (name))) {
            unsigned int timecode[2];
            timecode[0] = tattr->value().timeAndFlags(Imf::TimeCode::TV60_PACKING); //TV60 returns unchanged _time
            timecode[1] = tattr->value().userData();

            // Elevate "timeCode" to smpte:TimeCode
            if (oname == "timeCode")
                oname = "smpte:TimeCode";
            spec.attribute(oname, TypeDesc::TypeTimeCode, timecode);
        }
        else if (type == "keycode" &&
                 (kcattr = header->findTypedAttribute<Imf::KeyCodeAttribute> (name))) {
            const Imf::KeyCode *k = &kcattr->value();
            unsigned int keycode[7];
            keycode[0] = k->filmMfcCode();
            keycode[1] = k->filmType();
            keycode[2] = k->prefix();
            keycode[3] = k->count();
            keycode[4] = k->perfOffset();
            keycode[5] = k->perfsPerFrame();
            keycode[6] = k->perfsPerCount();

            // Elevate "keyCode" to smpte:KeyCode
            if (oname == "keyCode")
                oname = "smpte:KeyCode";
            spec.attribute(oname, TypeDesc::TypeKeyCode, keycode);
        }
        else {
#if 0
            std::cerr << "  unknown attribute " << type << ' ' << name << "\n";
#endif
        }
    }

#ifdef USE_OPENEXR_VERSION2
    // EXR "name" also gets passed along as "oiio:subimagename".
    if (header->hasName())
        spec.attribute ("oiio:subimagename", header->name());
#endif

    initialized = true;
}
Пример #15
0
bool
OpenEXRInput::open (const std::string &name, ImageSpec &newspec)
{
    // Quick check to reject non-exr files
    bool tiled;
    if (! Imf::isOpenExrFile (name.c_str(), tiled))
        return false;

    pvt::set_exr_threads ();

    m_spec = ImageSpec(); // Clear everything with default constructor
    
    // Unless otherwise specified, exr files are assumed to be linear.
    m_spec.attribute ("oiio:ColorSpace", "Linear");
    
    try {
        if (tiled) {
            m_input_tiled = new Imf::TiledInputFile (name.c_str());
            m_header = &(m_input_tiled->header());
        } else {
            m_input_scanline = new Imf::InputFile (name.c_str());
            m_header = &(m_input_scanline->header());
        }
    }
    catch (const std::exception &e) {
        error ("OpenEXR exception: %s", e.what());
        return false;
    }
    if (! m_input_scanline && ! m_input_tiled) {
        error ("Unknown error opening EXR file");
        return false;
    }

    Imath::Box2i datawindow = m_header->dataWindow();
    m_spec.x = datawindow.min.x;
    m_spec.y = datawindow.min.y;
    m_spec.z = 0;
    m_spec.width = datawindow.max.x - datawindow.min.x + 1;
    m_spec.height = datawindow.max.y - datawindow.min.y + 1;
    m_spec.depth = 1;
    m_topwidth = m_spec.width;      // Save top-level mipmap dimensions
    m_topheight = m_spec.height;
    Imath::Box2i displaywindow = m_header->displayWindow();
    m_spec.full_x = displaywindow.min.x;
    m_spec.full_y = displaywindow.min.y;
    m_spec.full_z = 0;
    m_spec.full_width = displaywindow.max.x - displaywindow.min.x + 1;
    m_spec.full_height = displaywindow.max.y - displaywindow.min.y + 1;
    m_spec.full_depth = 1;
    if (tiled) {
        m_spec.tile_width = m_input_tiled->tileXSize();
        m_spec.tile_height = m_input_tiled->tileYSize();
    } else {
        m_spec.tile_width = 0;
        m_spec.tile_height = 0;
    }
    m_spec.tile_depth = 1;
    query_channels ();   // also sets format

    m_nsubimages = 1;
    if (tiled) {
        // FIXME: levelmode
        m_levelmode = m_input_tiled->levelMode();
        m_roundingmode = m_input_tiled->levelRoundingMode();
        if (m_levelmode == Imf::MIPMAP_LEVELS) {
            m_nmiplevels = m_input_tiled->numLevels();
            m_spec.attribute ("openexr:roundingmode", m_roundingmode);
        } else if (m_levelmode == Imf::RIPMAP_LEVELS) {
            m_nmiplevels = std::max (m_input_tiled->numXLevels(),
                                     m_input_tiled->numYLevels());
            m_spec.attribute ("openexr:roundingmode", m_roundingmode);
        } else {
            m_nmiplevels = 1;
        }
    } else {
        m_levelmode = Imf::ONE_LEVEL;
        m_nmiplevels = 1;
    }

    const Imf::EnvmapAttribute *envmap;
    envmap = m_header->findTypedAttribute<Imf::EnvmapAttribute>("envmap");
    if (envmap) {
        m_cubeface = (envmap->value() == Imf::ENVMAP_CUBE);
        m_spec.attribute ("textureformat", m_cubeface ? "CubeFace Environment" : "LatLong Environment");
        // OpenEXR conventions for env maps
        if (! m_cubeface)
            m_spec.attribute ("oiio:updirection", "y");
        m_spec.attribute ("oiio:sampleborder", 1);
        // FIXME - detect CubeFace Shadow?
    } else {
        m_cubeface = false;
        if (tiled && m_levelmode == Imf::MIPMAP_LEVELS)
            m_spec.attribute ("textureformat", "Plain Texture");
        // FIXME - detect Shadow
    }

    const Imf::CompressionAttribute *compressattr;
    compressattr = m_header->findTypedAttribute<Imf::CompressionAttribute>("compression");
    if (compressattr) {
        const char *comp = NULL;
        switch (compressattr->value()) {
        case Imf::NO_COMPRESSION    : comp = "none"; break;
        case Imf::RLE_COMPRESSION   : comp = "rle"; break;
        case Imf::ZIPS_COMPRESSION  : comp = "zip"; break;
        case Imf::ZIP_COMPRESSION   : comp = "zip"; break;
        case Imf::PIZ_COMPRESSION   : comp = "piz"; break;
        case Imf::PXR24_COMPRESSION : comp = "pxr24"; break;
#ifdef IMF_B44_COMPRESSION
            // The enum Imf::B44_COMPRESSION is not defined in older versions
            // of OpenEXR, and there are no explicit version numbers in the
            // headers.  BUT this other related #define is present only in
            // the newer version.
        case Imf::B44_COMPRESSION   : comp = "b44"; break;
        case Imf::B44A_COMPRESSION  : comp = "b44a"; break;
#endif
        default:
            break;
        }
        if (comp)
            m_spec.attribute ("compression", comp);
    }

    for (Imf::Header::ConstIterator hit = m_header->begin();
             hit != m_header->end();  ++hit) {
        const Imf::IntAttribute *iattr;
        const Imf::FloatAttribute *fattr;
        const Imf::StringAttribute *sattr;
        const Imf::M44fAttribute *mattr;
        const Imf::V3fAttribute *vattr;        
        const char *name = hit.name();
        std::string oname = exr_tag_to_ooio_std[name];
        if (oname.empty())   // Empty string means skip this attrib
            continue;
//        if (oname == name)
//            oname = std::string(format_name()) + "_" + oname;
        const Imf::Attribute &attrib = hit.attribute();
        std::string type = attrib.typeName();
        if (type == "string" && 
            (sattr = m_header->findTypedAttribute<Imf::StringAttribute> (name)))
            m_spec.attribute (oname, sattr->value().c_str());
        else if (type == "int" && 
            (iattr = m_header->findTypedAttribute<Imf::IntAttribute> (name)))
            m_spec.attribute (oname, iattr->value());
        else if (type == "float" && 
            (fattr = m_header->findTypedAttribute<Imf::FloatAttribute> (name)))
            m_spec.attribute (oname, fattr->value());
        else if (type == "m44f" && 
            (mattr = m_header->findTypedAttribute<Imf::M44fAttribute> (name)))
            m_spec.attribute (oname, TypeDesc::TypeMatrix, &(mattr->value()));
        else if (type == "v3f" &&
                 (vattr = m_header->findTypedAttribute<Imf::V3fAttribute> (name)))
            m_spec.attribute (oname, TypeDesc::TypeVector, &(vattr->value()));
        else {
#if 0
            std::cerr << "  unknown attribute " << type << ' ' << name << "\n";
#endif
        }
    }

    m_subimage = 0;
    m_miplevel = 0;
    newspec = m_spec;
    return true;
}
Пример #16
0
bool
FFmpegInput::open (const std::string &name, ImageSpec &spec)
{
    static boost::once_flag init_flag = BOOST_ONCE_INIT;
    boost::call_once (&av_register_all, init_flag);
    const char *file_name = name.c_str();
    av_log_set_level (AV_LOG_FATAL);
    if (avformat_open_input (&m_format_context, file_name, NULL, NULL) != 0) // avformat_open_input allocs format_context
    {
        error ("\"%s\" could not open input", file_name);
        return false;
    }
    if (avformat_find_stream_info (m_format_context, NULL) < 0)
    {
        error ("\"%s\" could not find stream info", file_name);
        return false;
    }
    m_video_stream = -1;
    for (unsigned int i=0; i<m_format_context->nb_streams; i++) {
        if (m_format_context->streams[i]->codec->codec_type == AVMEDIA_TYPE_VIDEO) {
            if (m_video_stream < 0) {
                m_video_stream=i;
            }
            m_video_indexes.push_back (i); // needed for later use
            break;
        }
    }  
    if (m_video_stream == -1) {
        error ("\"%s\" could not find a valid videostream", file_name);
        return false;
    }
    m_codec_context = m_format_context->streams[m_video_stream]->codec; // codec context for videostream
    m_codec = avcodec_find_decoder (m_codec_context->codec_id);
    if (!m_codec) {
        error ("\"%s\" unsupported codec", file_name);
        return false;
    }
    if (avcodec_open2 (m_codec_context, m_codec, NULL) < 0) {
        error ("\"%s\" could not open codec", file_name);
        return false;
    }
    if (!strcmp (m_codec_context->codec->name, "mjpeg") ||
        !strcmp (m_codec_context->codec->name, "dvvideo")) {
        m_offset_time = false;
    }
    AVStream *stream = m_format_context->streams[m_video_stream];
    if (stream->r_frame_rate.num != 0 && stream->r_frame_rate.den != 0) {
        m_frame_rate = stream->r_frame_rate;
    }
    if (static_cast<int64_t> (m_format_context->duration) != AV_NOPTS_VALUE) {
        m_frames = static_cast<uint64_t> ((fps() * static_cast<double>(m_format_context->duration) / 
                                                   static_cast<uint64_t>(AV_TIME_BASE)));
    } else {
        m_frames = 1 << 29;
    }
    AVPacket pkt;
    if (!m_frames) {
        seek (0);
        av_init_packet (&pkt);
        av_read_frame (m_format_context, &pkt);
        uint64_t first_pts = pkt.pts;
        uint64_t max_pts = first_pts;
        seek (1 << 29);
        av_init_packet (&pkt);
        while (stream && av_read_frame (m_format_context, &pkt) >= 0) {
            uint64_t current_pts = static_cast<uint64_t> (av_q2d(stream->time_base) * (pkt.pts - first_pts) * fps());
            if (current_pts > max_pts) {
                max_pts = current_pts;
            }
        }
        m_frames = max_pts;
    }
    m_frame = av_frame_alloc();
    m_rgb_frame = av_frame_alloc();
    m_rgb_buffer.resize(
        avpicture_get_size (PIX_FMT_RGB24,
        m_codec_context->width,
        m_codec_context->height),
        0
    );
    AVPixelFormat pixFormat;
    switch (m_codec_context->pix_fmt) { // deprecation warning for YUV formats
        case AV_PIX_FMT_YUVJ420P:
            pixFormat = AV_PIX_FMT_YUV420P;
            break;
        case AV_PIX_FMT_YUVJ422P:
            pixFormat = AV_PIX_FMT_YUV422P;
            break;
        case AV_PIX_FMT_YUVJ444P:
            pixFormat = AV_PIX_FMT_YUV444P;
            break;
        case AV_PIX_FMT_YUVJ440P:
            pixFormat = AV_PIX_FMT_YUV440P;
        default:
            pixFormat = m_codec_context->pix_fmt;
            break;
    }
    m_sws_rgb_context = sws_getContext(
        m_codec_context->width,
        m_codec_context->height,
        pixFormat,
        m_codec_context->width,
        m_codec_context->height,
        PIX_FMT_RGB24,
        SWS_AREA,
        NULL,
        NULL,
        NULL
    );
    m_spec = ImageSpec (m_codec_context->width, m_codec_context->height, 3, TypeDesc::UINT8);
    AVDictionaryEntry *tag = NULL;
    while ((tag = av_dict_get (m_format_context->metadata, "", tag, AV_DICT_IGNORE_SUFFIX))) {
        m_spec.attribute (tag->key, tag->value);
    }
    m_spec.attribute ("fps", m_frame_rate.num / static_cast<float> (m_frame_rate.den));
    m_spec.attribute ("oiio:Movie", true);
    m_nsubimages = m_frames;
    spec = m_spec;
    return true;
}
Пример #17
0
bool
FFmpegInput::open (const std::string &name, ImageSpec &spec)
{
    // Temporary workaround: refuse to open a file whose name does not
    // indicate that it's a movie file. This avoids the problem that ffmpeg
    // is willing to open tiff and other files better handled by other
    // plugins. The better long-term solution is to replace av_register_all
    // with our own function that registers only the formats that we want
    // this reader to handle. At some point, we will institute that superior
    // approach, but in the mean time, this is a quick solution that 90%
    // does the job.
    bool valid_extension = false;
    for (int i = 0; ffmpeg_input_extensions[i]; ++i)
        if (Strutil::ends_with (name, ffmpeg_input_extensions[i])) {
            valid_extension = true;
            break;
        }
    if (! valid_extension) {
        error ("\"%s\" could not open input", name);
        return false;
    }

    static boost::once_flag init_flag = BOOST_ONCE_INIT;
    boost::call_once (&av_register_all, init_flag);
    const char *file_name = name.c_str();
    av_log_set_level (AV_LOG_FATAL);
    if (avformat_open_input (&m_format_context, file_name, NULL, NULL) != 0) // avformat_open_input allocs format_context
    {
        error ("\"%s\" could not open input", file_name);
        return false;
    }
    if (avformat_find_stream_info (m_format_context, NULL) < 0)
    {
        error ("\"%s\" could not find stream info", file_name);
        return false;
    }
    m_video_stream = -1;
    for (unsigned int i=0; i<m_format_context->nb_streams; i++) {
        if (m_format_context->streams[i]->codec->codec_type == AVMEDIA_TYPE_VIDEO) {
            if (m_video_stream < 0) {
                m_video_stream=i;
            }
            m_video_indexes.push_back (i); // needed for later use
            break;
        }
    }  
    if (m_video_stream == -1) {
        error ("\"%s\" could not find a valid videostream", file_name);
        return false;
    }
    m_codec_context = m_format_context->streams[m_video_stream]->codec; // codec context for videostream
    m_codec = avcodec_find_decoder (m_codec_context->codec_id);
    if (!m_codec) {
        error ("\"%s\" unsupported codec", file_name);
        return false;
    }
    if (avcodec_open2 (m_codec_context, m_codec, NULL) < 0) {
        error ("\"%s\" could not open codec", file_name);
        return false;
    }
    if (!strcmp (m_codec_context->codec->name, "mjpeg") ||
        !strcmp (m_codec_context->codec->name, "dvvideo")) {
        m_offset_time = false;
    }
    AVStream *stream = m_format_context->streams[m_video_stream];
    if (stream->r_frame_rate.num != 0 && stream->r_frame_rate.den != 0) {
        m_frame_rate = stream->r_frame_rate;
    }
    if (static_cast<int64_t> (m_format_context->duration) != AV_NOPTS_VALUE) {
        m_frames = static_cast<uint64_t> ((fps() * static_cast<double>(m_format_context->duration) / 
                                                   static_cast<uint64_t>(AV_TIME_BASE)));
    } else {
        m_frames = 1 << 29;
    }
    AVPacket pkt;
    if (!m_frames) {
        seek (0);
        av_init_packet (&pkt);
        av_read_frame (m_format_context, &pkt);
        uint64_t first_pts = pkt.pts;
        uint64_t max_pts = first_pts;
        seek (1 << 29);
        av_init_packet (&pkt);
        while (stream && av_read_frame (m_format_context, &pkt) >= 0) {
            uint64_t current_pts = static_cast<uint64_t> (av_q2d(stream->time_base) * (pkt.pts - first_pts) * fps());
            if (current_pts > max_pts) {
                max_pts = current_pts;
            }
        }
        m_frames = max_pts;
    }
    m_frame = av_frame_alloc();
    m_rgb_frame = av_frame_alloc();
    m_rgb_buffer.resize(
        avpicture_get_size (PIX_FMT_RGB24,
        m_codec_context->width,
        m_codec_context->height),
        0
    );
    AVPixelFormat pixFormat;
    switch (m_codec_context->pix_fmt) { // deprecation warning for YUV formats
        case AV_PIX_FMT_YUVJ420P:
            pixFormat = AV_PIX_FMT_YUV420P;
            break;
        case AV_PIX_FMT_YUVJ422P:
            pixFormat = AV_PIX_FMT_YUV422P;
            break;
        case AV_PIX_FMT_YUVJ444P:
            pixFormat = AV_PIX_FMT_YUV444P;
            break;
        case AV_PIX_FMT_YUVJ440P:
            pixFormat = AV_PIX_FMT_YUV440P;
        default:
            pixFormat = m_codec_context->pix_fmt;
            break;
    }
    m_sws_rgb_context = sws_getContext(
        m_codec_context->width,
        m_codec_context->height,
        pixFormat,
        m_codec_context->width,
        m_codec_context->height,
        PIX_FMT_RGB24,
        SWS_AREA,
        NULL,
        NULL,
        NULL
    );
    m_spec = ImageSpec (m_codec_context->width, m_codec_context->height, 3, TypeDesc::UINT8);
    AVDictionaryEntry *tag = NULL;
    while ((tag = av_dict_get (m_format_context->metadata, "", tag, AV_DICT_IGNORE_SUFFIX))) {
        m_spec.attribute (tag->key, tag->value);
    }
    m_spec.attribute ("fps", m_frame_rate.num / static_cast<float> (m_frame_rate.den));
    m_spec.attribute ("oiio:Movie", true);
    m_nsubimages = m_frames;
    spec = m_spec;
    return true;
}
Пример #18
0
void
OpenEXRInput::PartInfo::parse_header (const Imf::Header *header)
{
    if (initialized)
        return;

    ASSERT (header);
    spec = ImageSpec();

    top_datawindow = header->dataWindow();
    top_displaywindow = header->displayWindow();
    spec.x = top_datawindow.min.x;
    spec.y = top_datawindow.min.y;
    spec.z = 0;
    spec.width  = top_datawindow.max.x - top_datawindow.min.x + 1;
    spec.height = top_datawindow.max.y - top_datawindow.min.y + 1;
    spec.depth = 1;
    topwidth = spec.width;      // Save top-level mipmap dimensions
    topheight = spec.height;
    spec.full_x = top_displaywindow.min.x;
    spec.full_y = top_displaywindow.min.y;
    spec.full_z = 0;
    spec.full_width  = top_displaywindow.max.x - top_displaywindow.min.x + 1;
    spec.full_height = top_displaywindow.max.y - top_displaywindow.min.y + 1;
    spec.full_depth = 1;
    spec.tile_depth = 1;

    if (header->hasTileDescription()) {
        const Imf::TileDescription &td (header->tileDescription());
        spec.tile_width = td.xSize;
        spec.tile_height = td.ySize;
        levelmode = td.mode;
        roundingmode = td.roundingMode;
        if (levelmode == Imf::MIPMAP_LEVELS)
            nmiplevels = numlevels (std::max(topwidth,topheight), roundingmode);
        else if (levelmode == Imf::RIPMAP_LEVELS)
            nmiplevels = numlevels (std::max(topwidth,topheight), roundingmode);
        else
            nmiplevels = 1;
    } else {
        spec.tile_width = 0;
        spec.tile_height = 0;
        levelmode = Imf::ONE_LEVEL;
        nmiplevels = 1;
    }
    query_channels (header);   // also sets format

#ifdef USE_OPENEXR_VERSION2
    spec.deep = Imf::isDeepData (header->type());
#endif

    // Unless otherwise specified, exr files are assumed to be linear.
    spec.attribute ("oiio:ColorSpace", "Linear");

    if (levelmode != Imf::ONE_LEVEL)
        spec.attribute ("openexr:roundingmode", roundingmode);

    const Imf::EnvmapAttribute *envmap;
    envmap = header->findTypedAttribute<Imf::EnvmapAttribute>("envmap");
    if (envmap) {
        cubeface = (envmap->value() == Imf::ENVMAP_CUBE);
        spec.attribute ("textureformat", cubeface ? "CubeFace Environment" : "LatLong Environment");
        // OpenEXR conventions for env maps
        if (! cubeface)
            spec.attribute ("oiio:updirection", "y");
        spec.attribute ("oiio:sampleborder", 1);
        // FIXME - detect CubeFace Shadow?
    } else {
        cubeface = false;
        if (spec.tile_width && levelmode == Imf::MIPMAP_LEVELS)
            spec.attribute ("textureformat", "Plain Texture");
        // FIXME - detect Shadow
    }

    const Imf::CompressionAttribute *compressattr;
    compressattr = header->findTypedAttribute<Imf::CompressionAttribute>("compression");
    if (compressattr) {
        const char *comp = NULL;
        switch (compressattr->value()) {
        case Imf::NO_COMPRESSION    : comp = "none"; break;
        case Imf::RLE_COMPRESSION   : comp = "rle"; break;
        case Imf::ZIPS_COMPRESSION  : comp = "zips"; break;
        case Imf::ZIP_COMPRESSION   : comp = "zip"; break;
        case Imf::PIZ_COMPRESSION   : comp = "piz"; break;
        case Imf::PXR24_COMPRESSION : comp = "pxr24"; break;
#ifdef IMF_B44_COMPRESSION
            // The enum Imf::B44_COMPRESSION is not defined in older versions
            // of OpenEXR, and there are no explicit version numbers in the
            // headers.  BUT this other related #define is present only in
            // the newer version.
        case Imf::B44_COMPRESSION   : comp = "b44"; break;
        case Imf::B44A_COMPRESSION  : comp = "b44a"; break;
#endif
        default:
            break;
        }
        if (comp)
            spec.attribute ("compression", comp);
    }

    for (Imf::Header::ConstIterator hit = header->begin();
             hit != header->end();  ++hit) {
        const Imf::IntAttribute *iattr;
        const Imf::FloatAttribute *fattr;
        const Imf::StringAttribute *sattr;
        const Imf::M44fAttribute *mattr;
        const Imf::V3fAttribute *vattr;
        const Imf::V2fAttribute *v2attr;
        const char *name = hit.name();
        std::string oname = exr_tag_to_ooio_std[name];
        if (oname.empty())   // Empty string means skip this attrib
            continue;
//        if (oname == name)
//            oname = std::string(format_name()) + "_" + oname;
        const Imf::Attribute &attrib = hit.attribute();
        std::string type = attrib.typeName();
        if (type == "string" && 
            (sattr = header->findTypedAttribute<Imf::StringAttribute> (name)))
            spec.attribute (oname, sattr->value().c_str());
        else if (type == "int" && 
            (iattr = header->findTypedAttribute<Imf::IntAttribute> (name)))
            spec.attribute (oname, iattr->value());
        else if (type == "float" && 
            (fattr = header->findTypedAttribute<Imf::FloatAttribute> (name)))
            spec.attribute (oname, fattr->value());
        else if (type == "m44f" && 
            (mattr = header->findTypedAttribute<Imf::M44fAttribute> (name)))
            spec.attribute (oname, TypeDesc::TypeMatrix, &(mattr->value()));
        else if (type == "v3f" &&
                 (vattr = header->findTypedAttribute<Imf::V3fAttribute> (name)))
            spec.attribute (oname, TypeDesc::TypeVector, &(vattr->value()));
        else if (type == "v2f" &&
                 (v2attr = header->findTypedAttribute<Imf::V2fAttribute> (name))) {
            TypeDesc v2 (TypeDesc::FLOAT,TypeDesc::VEC2);
            spec.attribute (oname, v2, &(v2attr->value()));
        }
        else {
#if 0
            std::cerr << "  unknown attribute " << type << ' ' << name << "\n";
#endif
        }
    }

    initialized = true;
}
Пример #19
0
bool
DPXInput::seek_subimage (int subimage, int miplevel, ImageSpec &newspec)
{
    if (miplevel != 0)
        return false;
    if (subimage < 0 || subimage >= m_dpx.header.ImageElementCount ())
        return false;

    m_subimage = subimage;

    // check if the client asked us for raw data
    m_wantRaw = newspec.get_int_attribute ("dpx:RawData", 0) != 0;

    // create imagespec
    TypeDesc typedesc;
    switch (m_dpx.header.ComponentDataSize(subimage)) {
        case dpx::kByte:
            typedesc = m_dpx.header.DataSign (subimage)
                ? TypeDesc::INT8 : TypeDesc::UINT8;
            break;
        case dpx::kWord:
            typedesc = m_dpx.header.DataSign (subimage)
                ? TypeDesc::INT16 : TypeDesc::UINT16;
            break;
        case dpx::kInt:
            typedesc = m_dpx.header.DataSign (subimage)
                ? TypeDesc::INT32 : TypeDesc::UINT32;
            break;
        case dpx::kFloat:
            typedesc = TypeDesc::FLOAT;
            break;
        case dpx::kDouble:
            typedesc = TypeDesc::DOUBLE;
            break;
        default:
            error ("Invalid component data size");
            return false;
    }
    m_spec = ImageSpec (m_dpx.header.Width(), m_dpx.header.Height(),
        m_dpx.header.ImageElementComponentCount(subimage), typedesc);
    // fill channel names
    m_spec.channelnames.clear ();
    switch (m_dpx.header.ImageDescriptor(subimage)) {
        /*case dpx::kUserDefinedDescriptor:
            break;*/
        case dpx::kRed:
            m_spec.channelnames.push_back("R");
            break;
        case dpx::kGreen:
            m_spec.channelnames.push_back("G");
            break;
        case dpx::kBlue:
            m_spec.channelnames.push_back("B");
            break;
        case dpx::kAlpha:
            m_spec.channelnames.push_back("A");
            m_spec.alpha_channel = 0;
            break;
        case dpx::kLuma:
            // FIXME: do we treat this as intensity or do we use Y' as per
            // convention to differentiate it from linear luminance?
            m_spec.channelnames.push_back("Y'");
            break;
        case dpx::kDepth:
            m_spec.channelnames.push_back("Z");
            m_spec.z_channel = 0;
            break;
        /*case dpx::kCompositeVideo:
            break;*/
        case dpx::kRGB:
        case dpx::kRGBA:
        case dpx::kABGR:    // colour converter will swap the bytes for us
            m_spec.default_channel_names ();
            break;
        case dpx::kCbYCrY:
            if (m_wantRaw) {
                m_spec.channelnames.push_back("CbCr");
                m_spec.channelnames.push_back("Y");
            } else {
                m_spec.nchannels = 3;
                m_spec.default_channel_names ();
            }
            break;
        case dpx::kCbYACrYA:
            if (m_wantRaw) {
                m_spec.channelnames.push_back("CbCr");
                m_spec.channelnames.push_back("Y");
                m_spec.channelnames.push_back("A");
                m_spec.alpha_channel = 2;
            } else {
                m_spec.nchannels = 4;
                m_spec.default_channel_names ();
            }
            break;
        case dpx::kCbYCr:
            if (m_wantRaw) {
                m_spec.channelnames.push_back("Cb");
                m_spec.channelnames.push_back("Y");
                m_spec.channelnames.push_back("Cr");
            } else
                m_spec.default_channel_names ();
            break;
        case dpx::kCbYCrA:
            if (m_wantRaw) {
                m_spec.channelnames.push_back("Cb");
                m_spec.channelnames.push_back("Y");
                m_spec.channelnames.push_back("Cr");
                m_spec.channelnames.push_back("A");
                m_spec.alpha_channel = 3;
            } else {
                m_spec.default_channel_names ();
            }
            break;
        default:
            {
                for (int i = 0;
                    i < m_dpx.header.ImageElementComponentCount(subimage); i++) {
                    std::string ch = "channel" + i;
                    m_spec.channelnames.push_back(ch);
                }
            }
    }
    // bits per pixel
    m_spec.attribute ("oiio:BitsPerSample", m_dpx.header.BitDepth(subimage));
    // image orientation - see appendix B.2 of the OIIO documentation
    int orientation;
    switch (m_dpx.header.ImageOrientation ()) {
        case dpx::kLeftToRightTopToBottom:
            orientation = 1;
            break;
        case dpx::kRightToLeftTopToBottom:
            orientation = 2;
            break;
        case dpx::kLeftToRightBottomToTop:
            orientation = 4;
            break;
        case dpx::kRightToLeftBottomToTop:
            orientation = 3;
            break;
        case dpx::kTopToBottomLeftToRight:
            orientation = 5;
            break;
        case dpx::kTopToBottomRightToLeft:
            orientation = 6;
            break;
        case dpx::kBottomToTopLeftToRight:
            orientation = 8;
            break;
        case dpx::kBottomToTopRightToLeft:
            orientation = 7;
            break;
        default:
            orientation = 0;
            break;
    }
    m_spec.attribute ("Orientation", orientation);

    // image linearity
    switch (m_dpx.header.Transfer (subimage)) {
        case dpx::kLinear:
            m_spec.attribute ("oiio:ColorSpace", "Linear");
            break;
        case dpx::kLogarithmic:
            m_spec.attribute ("oiio:ColorSpace", "KodakLog");
            break;
        case dpx::kITUR709:
            m_spec.attribute ("oiio:ColorSpace", "Rec709");
            break;
        case dpx::kUserDefined:
            if (! isnan (m_dpx.header.Gamma ()) && m_dpx.header.Gamma () != 0) {
                m_spec.attribute ("oiio:ColorSpace", "GammaCorrected");
                m_spec.attribute ("oiio:Gamma", (float) m_dpx.header.Gamma ());
                break;
            }
            // intentional fall-through
        /*case dpx::kPrintingDensity:
        case dpx::kUnspecifiedVideo:
        case dpx::kSMPTE274M:
        case dpx::kITUR601:
        case dpx::kITUR602:
        case dpx::kNTSCCompositeVideo:
        case dpx::kPALCompositeVideo:
        case dpx::kZLinear:
        case dpx::kZHomogeneous:
        case dpx::kUndefinedCharacteristic:*/
        default:
            break;
    }
    m_spec.attribute ("dpx:Transfer",
        get_characteristic_string (m_dpx.header.Transfer (subimage)));
    // colorimetric characteristic
    m_spec.attribute ("dpx:Colorimetric",
        get_characteristic_string (m_dpx.header.Colorimetric (subimage)));

    // general metadata
    // some non-compliant writers will dump a field filled with 0xFF rather
    // than a NULL string termination on the first character, so take that
    // into account, too
    if (m_dpx.header.copyright[0] && m_dpx.header.copyright[0] != (char)0xFF)
        m_spec.attribute ("Copyright", m_dpx.header.copyright);
    if (m_dpx.header.creator[0] && m_dpx.header.creator[0] != (char)0xFF)
        m_spec.attribute ("Software", m_dpx.header.creator);
    if (m_dpx.header.project[0] && m_dpx.header.project[0] != (char)0xFF)
        m_spec.attribute ("DocumentName", m_dpx.header.project);
    if (m_dpx.header.creationTimeDate[0]) {
        // libdpx's date/time format is pretty close to OIIO's (libdpx uses
        // %Y:%m:%d:%H:%M:%S%Z)
        char date[24];
        strcpy(date, m_dpx.header.creationTimeDate);
        date[10] = ' ';
        date[19] = 0;
        m_spec.attribute ("DateTime", date);
    }
    if (m_dpx.header.ImageEncoding (subimage) == dpx::kRLE)
        m_spec.attribute ("compression", "rle");
    char buf[32 + 1];
    m_dpx.header.Description (subimage, buf);
    if (buf[0] && buf[0] != -1)
        m_spec.attribute ("ImageDescription", buf);
    m_spec.attribute ("PixelAspectRatio", m_dpx.header.AspectRatio(0)
         / (float)m_dpx.header.AspectRatio(1));

    // DPX-specific metadata
    m_spec.attribute ("dpx:ImageDescriptor",
        get_descriptor_string (m_dpx.header.ImageDescriptor (subimage)));
    // save some typing by using macros
    // "internal" macros
#define DPX_SET_ATTRIB_S(x, n, s)   m_spec.attribute (s,                      \
                                        m_dpx.header.x (n))
#define DPX_SET_ATTRIB(x, n)        DPX_SET_ATTRIB_S(x, n, "dpx:" #x)
    // set without checking for bogus attributes
#define DPX_SET_ATTRIB_N(x)         DPX_SET_ATTRIB(x, subimage)
    // set with checking for bogus attributes
#define DPX_SET_ATTRIB_BYTE(x)      if (m_dpx.header.x () != 0xFF)      \
                                        DPX_SET_ATTRIB(x, )
#define DPX_SET_ATTRIB_INT_N(x)     if (m_dpx.header.x (subimage) != 0xFFFFFFFF) \
                                        DPX_SET_ATTRIB(x, subimage)
#define DPX_SET_ATTRIB_INT(x)       if (m_dpx.header.x () != 0xFFFFFFFF)      \
                                        DPX_SET_ATTRIB(x, )
#define DPX_SET_ATTRIB_FLOAT_N(x)   if (! isnan(m_dpx.header.x (subimage)))      \
                                        DPX_SET_ATTRIB(x, subimage)
#define DPX_SET_ATTRIB_FLOAT(x)     if (! isnan(m_dpx.header.x ()))           \
                                        DPX_SET_ATTRIB(x, )
    // see comment above Copyright, Software and DocumentName
#define DPX_SET_ATTRIB_STR(X, x)    if (m_dpx.header.x[0]                     \
                                        && m_dpx.header.x[0] != -1)           \
                                        m_spec.attribute ("dpx:" #X,          \
                                            m_dpx.header.x)

    DPX_SET_ATTRIB_INT(EncryptKey);
    DPX_SET_ATTRIB_INT(DittoKey);
    DPX_SET_ATTRIB_INT_N(LowData);
    DPX_SET_ATTRIB_FLOAT_N(LowQuantity);
    DPX_SET_ATTRIB_INT_N(HighData);
    DPX_SET_ATTRIB_FLOAT_N(HighQuantity);
    DPX_SET_ATTRIB_FLOAT(XScannedSize);
    DPX_SET_ATTRIB_FLOAT(YScannedSize);
    DPX_SET_ATTRIB_INT(FramePosition);
    DPX_SET_ATTRIB_INT(SequenceLength);
    DPX_SET_ATTRIB_INT(HeldCount);
    DPX_SET_ATTRIB_FLOAT(FrameRate);
    DPX_SET_ATTRIB_FLOAT(ShutterAngle);
    DPX_SET_ATTRIB_STR(Version, version);
    DPX_SET_ATTRIB_STR(Format, format);
    DPX_SET_ATTRIB_STR(FrameId, frameId);
    DPX_SET_ATTRIB_STR(SlateInfo, slateInfo);
    DPX_SET_ATTRIB_STR(SourceImageFileName, sourceImageFileName);
    DPX_SET_ATTRIB_STR(InputDevice, inputDevice);
    DPX_SET_ATTRIB_STR(InputDeviceSerialNumber, inputDeviceSerialNumber);
    DPX_SET_ATTRIB_BYTE(Interlace);
    DPX_SET_ATTRIB_BYTE(FieldNumber);
    DPX_SET_ATTRIB_FLOAT(HorizontalSampleRate);
    DPX_SET_ATTRIB_FLOAT(VerticalSampleRate);
    DPX_SET_ATTRIB_FLOAT(TemporalFrameRate);
    DPX_SET_ATTRIB_FLOAT(TimeOffset);
    DPX_SET_ATTRIB_FLOAT(BlackLevel);
    DPX_SET_ATTRIB_FLOAT(BlackGain);
    DPX_SET_ATTRIB_FLOAT(BreakPoint);
    DPX_SET_ATTRIB_FLOAT(WhiteLevel);
    DPX_SET_ATTRIB_FLOAT(IntegrationTimes);

#undef DPX_SET_ATTRIB_STR
#undef DPX_SET_ATTRIB_FLOAT
#undef DPX_SET_ATTRIB_FLOAT_N
#undef DPX_SET_ATTRIB_INT
#undef DPX_SET_ATTRIB_INT_N
#undef DPX_SET_ATTRIB_N
#undef DPX_SET_ATTRIB
#undef DPX_SET_ATTRIB_S

    std::string tmpstr;
    switch (m_dpx.header.ImagePacking (subimage)) {
        case dpx::kPacked:
            tmpstr = "Packed";
            break;
        case dpx::kFilledMethodA:
            tmpstr = "Filled, method A";
            break;
        case dpx::kFilledMethodB:
            tmpstr = "Filled, method B";
            break;
    }
    if (!tmpstr.empty ())
        m_spec.attribute ("dpx:Packing", tmpstr);

    if (m_dpx.header.timeCode != 0xFFFFFFFF)
        m_spec.attribute ("dpx:TimeCode", m_dpx.header.timeCode);
    if (m_dpx.header.userBits != 0xFFFFFFFF)
        m_spec.attribute ("dpx:UserBits", m_dpx.header.userBits);
    if (m_dpx.header.sourceTimeDate[0]) {
        // libdpx's date/time format is pretty close to OIIO's (libdpx uses
        // %Y:%m:%d:%H:%M:%S%Z)
        char date[24];
        strcpy(date, m_dpx.header.sourceTimeDate);
        date[10] = ' ';
        date[19] = 0;
        m_spec.attribute ("dpx:SourceDateTime", date);
    }
    m_dpx.header.FilmEdgeCode(buf);
    if (buf[0])
        m_spec.attribute ("dpx:FilmEdgeCode", buf);

    tmpstr.clear ();
    switch (m_dpx.header.Signal ()) {
        case dpx::kUndefined:
            tmpstr = "Undefined";
            break;
        case dpx::kNTSC:
            tmpstr = "NTSC";
            break;
        case dpx::kPAL:
            tmpstr = "PAL";
            break;
        case dpx::kPAL_M:
            tmpstr = "PAL-M";
            break;
        case dpx::kSECAM:
            tmpstr = "SECAM";
            break;
        case dpx::k525LineInterlace43AR:
            tmpstr = "YCbCr ITU-R 601-5 525i, 4:3";
            break;
        case dpx::k625LineInterlace43AR:
            tmpstr = "YCbCr ITU-R 601-5 625i, 4:3";
            break;
        case dpx::k525LineInterlace169AR:
            tmpstr = "YCbCr ITU-R 601-5 525i, 16:9";
            break;
        case dpx::k625LineInterlace169AR:
            tmpstr = "YCbCr ITU-R 601-5 625i, 16:9";
            break;
        case dpx::k1050LineInterlace169AR:
            tmpstr = "YCbCr 1050i, 16:9";
            break;
        case dpx::k1125LineInterlace169AR_274:
            tmpstr = "YCbCr 1125i, 16:9 (SMPTE 274M)";
            break;
        case dpx::k1250LineInterlace169AR:
            tmpstr = "YCbCr 1250i, 16:9";
            break;
        case dpx::k1125LineInterlace169AR_240:
            tmpstr = "YCbCr 1125i, 16:9 (SMPTE 240M)";
            break;
        case dpx::k525LineProgressive169AR:
            tmpstr = "YCbCr 525p, 16:9";
            break;
        case dpx::k625LineProgressive169AR:
            tmpstr = "YCbCr 625p, 16:9";
            break;
        case dpx::k750LineProgressive169AR:
            tmpstr = "YCbCr 750p, 16:9 (SMPTE 296M)";
            break;
        case dpx::k1125LineProgressive169AR:
            tmpstr = "YCbCr 1125p, 16:9 (SMPTE 274M)";
            break;
        case 0xFF:
            // don't set the attribute at all
            break;
        default:
            tmpstr = Strutil::format ("Undefined %d",
                (int)m_dpx.header.Signal ());
            break;
    }
    if (!tmpstr.empty ())
        m_spec.attribute ("dpx:Signal", tmpstr);

    // read in user data; don't bother if the buffer is already filled (user
    // data is per-file, not per-element)
    if (m_userBuf.empty () && m_dpx.header.UserSize () != 0
        && m_dpx.header.UserSize () != 0xFFFFFFFF) {
        m_userBuf.resize (m_dpx.header.UserSize ());
        m_dpx.ReadUserData (&m_userBuf[0]);
    }
    if (!m_userBuf.empty ())
        m_spec.attribute ("dpx:UserData", TypeDesc (TypeDesc::UCHAR,
            m_dpx.header.UserSize ()), &m_userBuf[0]);

    dpx::Block block(0, 0, m_dpx.header.Width () - 1, 0);
    int bufsize = dpx::QueryRGBBufferSize (m_dpx.header, subimage, block);
    if (bufsize == 0 && !m_wantRaw) {
        error ("Unable to deliver RGB data from source data");
        return false;
    } else if (!m_wantRaw && bufsize > 0)
        m_dataPtr = new unsigned char[bufsize];
    else
        // no need to allocate another buffer
        m_dataPtr = NULL;
    
    newspec = m_spec;
    return true;
}
Пример #20
0
void
OpenEXRInput::PartInfo::parse_header (const Imf::Header *header)
{
    if (initialized)
        return;

    ASSERT (header);
    spec = ImageSpec();

    top_datawindow = header->dataWindow();
    top_displaywindow = header->displayWindow();
    spec.x = top_datawindow.min.x;
    spec.y = top_datawindow.min.y;
    spec.z = 0;
    spec.width  = top_datawindow.max.x - top_datawindow.min.x + 1;
    spec.height = top_datawindow.max.y - top_datawindow.min.y + 1;
    spec.depth = 1;
    topwidth = spec.width;      // Save top-level mipmap dimensions
    topheight = spec.height;
    spec.full_x = top_displaywindow.min.x;
    spec.full_y = top_displaywindow.min.y;
    spec.full_z = 0;
    spec.full_width  = top_displaywindow.max.x - top_displaywindow.min.x + 1;
    spec.full_height = top_displaywindow.max.y - top_displaywindow.min.y + 1;
    spec.full_depth = 1;
    spec.tile_depth = 1;

    if (header->hasTileDescription()
          && Strutil::icontains(header->type(), "tile")) {
        const Imf::TileDescription &td (header->tileDescription());
        spec.tile_width = td.xSize;
        spec.tile_height = td.ySize;
        levelmode = td.mode;
        roundingmode = td.roundingMode;
        if (levelmode == Imf::MIPMAP_LEVELS)
            nmiplevels = numlevels (std::max(topwidth,topheight), roundingmode);
        else if (levelmode == Imf::RIPMAP_LEVELS)
            nmiplevels = numlevels (std::max(topwidth,topheight), roundingmode);
        else
            nmiplevels = 1;
    } else {
        spec.tile_width = 0;
        spec.tile_height = 0;
        levelmode = Imf::ONE_LEVEL;
        nmiplevels = 1;
    }
    query_channels (header);   // also sets format

    spec.deep = Strutil::istarts_with (header->type(), "deep");

    // Unless otherwise specified, exr files are assumed to be linear.
    spec.attribute ("oiio:ColorSpace", "Linear");

    if (levelmode != Imf::ONE_LEVEL)
        spec.attribute ("openexr:roundingmode", roundingmode);

    const Imf::EnvmapAttribute *envmap;
    envmap = header->findTypedAttribute<Imf::EnvmapAttribute>("envmap");
    if (envmap) {
        cubeface = (envmap->value() == Imf::ENVMAP_CUBE);
        spec.attribute ("textureformat", cubeface ? "CubeFace Environment" : "LatLong Environment");
        // OpenEXR conventions for env maps
        if (! cubeface)
            spec.attribute ("oiio:updirection", "y");
        spec.attribute ("oiio:sampleborder", 1);
        // FIXME - detect CubeFace Shadow?
    } else {
        cubeface = false;
        if (spec.tile_width && levelmode == Imf::MIPMAP_LEVELS)
            spec.attribute ("textureformat", "Plain Texture");
        // FIXME - detect Shadow
    }

    const Imf::CompressionAttribute *compressattr;
    compressattr = header->findTypedAttribute<Imf::CompressionAttribute>("compression");
    if (compressattr) {
        const char *comp = NULL;
        switch (compressattr->value()) {
        case Imf::NO_COMPRESSION    : comp = "none"; break;
        case Imf::RLE_COMPRESSION   : comp = "rle"; break;
        case Imf::ZIPS_COMPRESSION  : comp = "zips"; break;
        case Imf::ZIP_COMPRESSION   : comp = "zip"; break;
        case Imf::PIZ_COMPRESSION   : comp = "piz"; break;
        case Imf::PXR24_COMPRESSION : comp = "pxr24"; break;
#ifdef IMF_B44_COMPRESSION
            // The enum Imf::B44_COMPRESSION is not defined in older versions
            // of OpenEXR, and there are no explicit version numbers in the
            // headers.  BUT this other related #define is present only in
            // the newer version.
        case Imf::B44_COMPRESSION   : comp = "b44"; break;
        case Imf::B44A_COMPRESSION  : comp = "b44a"; break;
#endif
#if defined(OPENEXR_VERSION_MAJOR) && \
    (OPENEXR_VERSION_MAJOR*10000+OPENEXR_VERSION_MINOR*100+OPENEXR_VERSION_PATCH) >= 20200
        case Imf::DWAA_COMPRESSION  : comp = "dwaa"; break;
        case Imf::DWAB_COMPRESSION  : comp = "dwab"; break;
#endif
        default:
            break;
        }
        if (comp)
            spec.attribute ("compression", comp);
    }

    for (Imf::Header::ConstIterator hit = header->begin();
             hit != header->end();  ++hit) {
        const Imf::IntAttribute *iattr;
        const Imf::FloatAttribute *fattr;
        const Imf::StringAttribute *sattr;
        const Imf::M33fAttribute *m33fattr;
        const Imf::M44fAttribute *m44fattr;
        const Imf::V3fAttribute *v3fattr;
        const Imf::V3iAttribute *v3iattr;
        const Imf::V2fAttribute *v2fattr;
        const Imf::V2iAttribute *v2iattr;
        const Imf::Box2iAttribute *b2iattr;
        const Imf::Box2fAttribute *b2fattr;
        const Imf::TimeCodeAttribute *tattr;
        const Imf::KeyCodeAttribute *kcattr;
        const Imf::ChromaticitiesAttribute *crattr;
        const Imf::RationalAttribute *rattr;
        const Imf::StringVectorAttribute *svattr;
        const Imf::DoubleAttribute *dattr;
        const Imf::V2dAttribute *v2dattr;
        const Imf::V3dAttribute *v3dattr;
        const Imf::M33dAttribute *m33dattr;
        const Imf::M44dAttribute *m44dattr;
        const char *name = hit.name();
        std::string oname = exr_tag_to_oiio_std[name];
        if (oname.empty())   // Empty string means skip this attrib
            continue;
//        if (oname == name)
//            oname = std::string(format_name()) + "_" + oname;
        const Imf::Attribute &attrib = hit.attribute();
        std::string type = attrib.typeName();
        if (type == "string" &&
            (sattr = header->findTypedAttribute<Imf::StringAttribute> (name)))
            spec.attribute (oname, sattr->value().c_str());
        else if (type == "int" &&
            (iattr = header->findTypedAttribute<Imf::IntAttribute> (name)))
            spec.attribute (oname, iattr->value());
        else if (type == "float" &&
            (fattr = header->findTypedAttribute<Imf::FloatAttribute> (name)))
            spec.attribute (oname, fattr->value());
        else if (type == "m33f" &&
            (m33fattr = header->findTypedAttribute<Imf::M33fAttribute> (name)))
            spec.attribute (oname, TypeMatrix33, &(m33fattr->value()));
        else if (type == "m44f" &&
            (m44fattr = header->findTypedAttribute<Imf::M44fAttribute> (name)))
            spec.attribute (oname, TypeMatrix44, &(m44fattr->value()));
        else if (type == "v3f" &&
                 (v3fattr = header->findTypedAttribute<Imf::V3fAttribute> (name)))
            spec.attribute (oname, TypeVector, &(v3fattr->value()));
        else if (type == "v3i" &&
                 (v3iattr = header->findTypedAttribute<Imf::V3iAttribute> (name))) {
            TypeDesc v3 (TypeDesc::INT, TypeDesc::VEC3, TypeDesc::VECTOR);
            spec.attribute (oname, v3, &(v3iattr->value()));
        }
        else if (type == "v2f" &&
                 (v2fattr = header->findTypedAttribute<Imf::V2fAttribute> (name))) {
            TypeDesc v2 (TypeDesc::FLOAT,TypeDesc::VEC2);
            spec.attribute (oname, v2, &(v2fattr->value()));
        }
        else if (type == "v2i" &&
                 (v2iattr = header->findTypedAttribute<Imf::V2iAttribute> (name))) {
            TypeDesc v2 (TypeDesc::INT,TypeDesc::VEC2);
            spec.attribute (oname, v2, &(v2iattr->value()));
        }
        else if (type == "stringvector" &&
            (svattr = header->findTypedAttribute<Imf::StringVectorAttribute> (name))) {
            std::vector<std::string> strvec = svattr->value();
            std::vector<ustring> ustrvec (strvec.size());
            for (size_t i = 0, e = strvec.size();  i < e;  ++i)
                ustrvec[i] = strvec[i];
            TypeDesc sv (TypeDesc::STRING, ustrvec.size());
            spec.attribute(oname, sv, &ustrvec[0]);
        }
        else if (type == "double" &&
            (dattr = header->findTypedAttribute<Imf::DoubleAttribute> (name))) {
            TypeDesc d (TypeDesc::DOUBLE);
            spec.attribute (oname, d, &(dattr->value()));
        }
        else if (type == "v2d" &&
                 (v2dattr = header->findTypedAttribute<Imf::V2dAttribute> (name))) {
            TypeDesc v2 (TypeDesc::DOUBLE,TypeDesc::VEC2);
            spec.attribute (oname, v2, &(v2dattr->value()));
        }
        else if (type == "v3d" &&
                 (v3dattr = header->findTypedAttribute<Imf::V3dAttribute> (name))) {
            TypeDesc v3 (TypeDesc::DOUBLE,TypeDesc::VEC3, TypeDesc::VECTOR);
            spec.attribute (oname, v3, &(v3dattr->value()));
        }
        else if (type == "m33d" &&
            (m33dattr = header->findTypedAttribute<Imf::M33dAttribute> (name))) {
            TypeDesc m33 (TypeDesc::DOUBLE, TypeDesc::MATRIX33);
            spec.attribute (oname, m33, &(m33dattr->value()));
        }
        else if (type == "m44d" &&
            (m44dattr = header->findTypedAttribute<Imf::M44dAttribute> (name))) {
            TypeDesc m44 (TypeDesc::DOUBLE, TypeDesc::MATRIX44);
            spec.attribute (oname, m44, &(m44dattr->value()));
        }
        else if (type == "box2i" &&
                 (b2iattr = header->findTypedAttribute<Imf::Box2iAttribute> (name))) {
            TypeDesc bx (TypeDesc::INT, TypeDesc::VEC2, 2);
            spec.attribute (oname, bx, &b2iattr->value());
        }
        else if (type == "box2f" &&
                 (b2fattr = header->findTypedAttribute<Imf::Box2fAttribute> (name))) {
            TypeDesc bx (TypeDesc::FLOAT, TypeDesc::VEC2, 2);
            spec.attribute (oname, bx, &b2fattr->value());
        }
        else if (type == "timecode" &&
                 (tattr = header->findTypedAttribute<Imf::TimeCodeAttribute> (name))) {
            unsigned int timecode[2];
            timecode[0] = tattr->value().timeAndFlags(Imf::TimeCode::TV60_PACKING); //TV60 returns unchanged _time
            timecode[1] = tattr->value().userData();

            // Elevate "timeCode" to smpte:TimeCode
            if (oname == "timeCode")
                oname = "smpte:TimeCode";
            spec.attribute(oname, TypeTimeCode, timecode);
        }
        else if (type == "keycode" &&
                 (kcattr = header->findTypedAttribute<Imf::KeyCodeAttribute> (name))) {
            const Imf::KeyCode *k = &kcattr->value();
            unsigned int keycode[7];
            keycode[0] = k->filmMfcCode();
            keycode[1] = k->filmType();
            keycode[2] = k->prefix();
            keycode[3] = k->count();
            keycode[4] = k->perfOffset();
            keycode[5] = k->perfsPerFrame();
            keycode[6] = k->perfsPerCount();

            // Elevate "keyCode" to smpte:KeyCode
            if (oname == "keyCode")
                oname = "smpte:KeyCode";
            spec.attribute(oname, TypeKeyCode, keycode);
        } else if (type == "chromaticities" &&
                   (crattr = header->findTypedAttribute<Imf::ChromaticitiesAttribute> (name))) {
            const Imf::Chromaticities *chroma = &crattr->value();
            spec.attribute (oname, TypeDesc(TypeDesc::FLOAT,8),
                            (const float *)chroma);
        } 
        else if (type == "rational" &&
                   (rattr = header->findTypedAttribute<Imf::RationalAttribute> (name))) {
            const Imf::Rational *rational = &rattr->value();
            int n = rational->n;
            unsigned int d = rational->d;
            if (d < (1UL << 31)) {
                int r[2];
                r[0] = n;
                r[1] = static_cast<int>(d);
                spec.attribute (oname, TypeRational, r);    
            } else if (int f = static_cast<int>(boost::math::gcd<long int>(rational[0], rational[1])) > 1) {
                int r[2];
                r[0] = n / f;
                r[1] = static_cast<int>(d / f);
               spec.attribute (oname, TypeRational, r);
            } else {
                // TODO: find a way to allow the client to accept "close" rational values
                OIIO::debug ("Don't know what to do with OpenEXR Rational attribute %s with value %d / %u that we cannot represent exactly", oname, n, d);
            }
        }
        else {
#if 0
            std::cerr << "  unknown attribute " << type << ' ' << name << "\n";
#endif
        }
    }

    float aspect = spec.get_float_attribute ("PixelAspectRatio", 0.0f);
    float xdensity = spec.get_float_attribute ("XResolution", 0.0f);
    if (xdensity) {
        // If XResolution is found, supply the YResolution and unit.
        spec.attribute ("YResolution", xdensity * (aspect ? aspect : 1.0f));
        spec.attribute ("ResolutionUnit", "in"); // EXR is always pixels/inch
    }

    // EXR "name" also gets passed along as "oiio:subimagename".
    if (header->hasName())
        spec.attribute ("oiio:subimagename", header->name());

    // Squash some problematic texture metadata if we suspect it's wrong
    pvt::check_texture_metadata_sanity (spec);

    initialized = true;
}
Пример #21
0
void
TIFFInput::readspec (bool read_meta)
{
    uint32 width = 0, height = 0, depth = 0;
    unsigned short nchans = 1;
    TIFFGetField (m_tif, TIFFTAG_IMAGEWIDTH, &width);
    TIFFGetField (m_tif, TIFFTAG_IMAGELENGTH, &height);
    TIFFGetFieldDefaulted (m_tif, TIFFTAG_IMAGEDEPTH, &depth);
    TIFFGetFieldDefaulted (m_tif, TIFFTAG_SAMPLESPERPIXEL, &nchans);

    if (read_meta) {
        // clear the whole m_spec and start fresh
        m_spec = ImageSpec ((int)width, (int)height, (int)nchans);
    } else {
        // assume m_spec is valid, except for things that might differ
        // between MIP levels
        m_spec.width = (int)width;
        m_spec.height = (int)height;
        m_spec.depth = (int)depth;
        m_spec.full_x = 0;
        m_spec.full_y = 0;
        m_spec.full_z = 0;
        m_spec.full_width = (int)width;
        m_spec.full_height = (int)height;
        m_spec.full_depth = (int)depth;
        m_spec.nchannels = (int)nchans;
    }

    float x = 0, y = 0;
    TIFFGetField (m_tif, TIFFTAG_XPOSITION, &x);
    TIFFGetField (m_tif, TIFFTAG_YPOSITION, &y);
    m_spec.x = (int)x;
    m_spec.y = (int)y;
    m_spec.z = 0;
    // FIXME? - TIFF spec describes the positions as in resolutionunit.
    // What happens if this is not unitless pixels?  Are we interpreting
    // it all wrong?

    if (TIFFGetField (m_tif, TIFFTAG_PIXAR_IMAGEFULLWIDTH, &width) == 1
          && width > 0)
        m_spec.full_width = width;
    if (TIFFGetField (m_tif, TIFFTAG_PIXAR_IMAGEFULLLENGTH, &height) == 1
          && height > 0)
        m_spec.full_height = height;

    if (TIFFIsTiled (m_tif)) {
        TIFFGetField (m_tif, TIFFTAG_TILEWIDTH, &m_spec.tile_width);
        TIFFGetField (m_tif, TIFFTAG_TILELENGTH, &m_spec.tile_height);
        TIFFGetFieldDefaulted (m_tif, TIFFTAG_TILEDEPTH, &m_spec.tile_depth);
    } else {
        m_spec.tile_width = 0;
        m_spec.tile_height = 0;
        m_spec.tile_depth = 0;
    }

    m_bitspersample = 8;
    TIFFGetField (m_tif, TIFFTAG_BITSPERSAMPLE, &m_bitspersample);
    m_spec.attribute ("oiio:BitsPerSample", (int)m_bitspersample);

    unsigned short sampleformat = SAMPLEFORMAT_UINT;
    TIFFGetFieldDefaulted (m_tif, TIFFTAG_SAMPLEFORMAT, &sampleformat);
    switch (m_bitspersample) {
    case 1:
    case 2:
    case 4:
    case 6:
        // Make 1, 2, 4, 6 bpp look like byte images
    case 8:
        if (sampleformat == SAMPLEFORMAT_UINT)
            m_spec.set_format (TypeDesc::UINT8);
        else if (sampleformat == SAMPLEFORMAT_INT)
            m_spec.set_format (TypeDesc::INT8);
        else m_spec.set_format (TypeDesc::UINT8);  // punt
        break;
    case 10:
    case 12:
    case 14:
        // Make 10, 12, 14 bpp look like 16 bit images
    case 16:
        if (sampleformat == SAMPLEFORMAT_UINT)
            m_spec.set_format (TypeDesc::UINT16);
        else if (sampleformat == SAMPLEFORMAT_INT)
            m_spec.set_format (TypeDesc::INT16);
        else if (sampleformat == SAMPLEFORMAT_IEEEFP)
            m_spec.set_format (TypeDesc::HALF); // not to spec, but why not?
        else
            m_spec.set_format (TypeDesc::UNKNOWN);
        break;
    case 32:
        if (sampleformat == SAMPLEFORMAT_IEEEFP)
            m_spec.set_format (TypeDesc::FLOAT);
        else if (sampleformat == SAMPLEFORMAT_UINT)
            m_spec.set_format (TypeDesc::UINT32);
        else if (sampleformat == SAMPLEFORMAT_INT)
            m_spec.set_format (TypeDesc::INT32);
        else
            m_spec.set_format (TypeDesc::UNKNOWN);
        break;
    case 64:
        if (sampleformat == SAMPLEFORMAT_IEEEFP)
            m_spec.set_format (TypeDesc::DOUBLE);
        else
            m_spec.set_format (TypeDesc::UNKNOWN);
        break;
    default:
        m_spec.set_format (TypeDesc::UNKNOWN);
        break;
    }

    // If we've been instructed to skip reading metadata, because it is
    // guaranteed to be identical to what we already have in m_spec,
    // skip everything following.
    if (! read_meta)
        return;

    // Use the table for all the obvious things that can be mindlessly
    // shoved into the image spec.
    for (int i = 0;  tiff_tag_table[i].name;  ++i)
        find_tag (tiff_tag_table[i].tifftag,
                  tiff_tag_table[i].tifftype, tiff_tag_table[i].name);

    // Now we need to get fields "by hand" for anything else that is less
    // straightforward...

    m_photometric = (m_spec.nchannels == 1 ? PHOTOMETRIC_MINISBLACK : PHOTOMETRIC_RGB);
    TIFFGetField (m_tif, TIFFTAG_PHOTOMETRIC, &m_photometric);
    m_spec.attribute ("tiff:PhotometricInterpretation", (int)m_photometric);
    if (m_photometric == PHOTOMETRIC_PALETTE) {
        // Read the color map
        unsigned short *r = NULL, *g = NULL, *b = NULL;
        TIFFGetField (m_tif, TIFFTAG_COLORMAP, &r, &g, &b);
        ASSERT (r != NULL && g != NULL && b != NULL);
        m_colormap.clear ();
        m_colormap.insert (m_colormap.end(), r, r + (1 << m_bitspersample));
        m_colormap.insert (m_colormap.end(), g, g + (1 << m_bitspersample));
        m_colormap.insert (m_colormap.end(), b, b + (1 << m_bitspersample));
        // Palette TIFF images are always 3 channels (to the client)
        m_spec.nchannels = 3;
        m_spec.default_channel_names ();
        // FIXME - what about palette + extra (alpha?) channels?  Is that
        // allowed?  And if so, ever encountered in the wild?
    }

    TIFFGetFieldDefaulted (m_tif, TIFFTAG_PLANARCONFIG, &m_planarconfig);
    m_separate = (m_planarconfig == PLANARCONFIG_SEPARATE &&
                  m_spec.nchannels > 1 &&
                  m_photometric != PHOTOMETRIC_PALETTE);
    m_spec.attribute ("tiff:PlanarConfiguration", (int)m_planarconfig);
    if (m_planarconfig == PLANARCONFIG_SEPARATE)
        m_spec.attribute ("planarconfig", "separate");
    else
        m_spec.attribute ("planarconfig", "contig");

    int compress = 0;
    TIFFGetFieldDefaulted (m_tif, TIFFTAG_COMPRESSION, &compress);
    m_spec.attribute ("tiff:Compression", compress);
    switch (compress) {
    case COMPRESSION_NONE :
        m_spec.attribute ("compression", "none");
        break;
    case COMPRESSION_LZW :
        m_spec.attribute ("compression", "lzw");
        break;
    case COMPRESSION_CCITTRLE :
        m_spec.attribute ("compression", "ccittrle");
        break;
    case COMPRESSION_DEFLATE :
    case COMPRESSION_ADOBE_DEFLATE :
        m_spec.attribute ("compression", "zip");
        break;
    case COMPRESSION_PACKBITS :
        m_spec.attribute ("compression", "packbits");
        break;
    default:
        break;
    }

    int rowsperstrip = -1;
    if (! m_spec.tile_width) {
        TIFFGetField (m_tif, TIFFTAG_ROWSPERSTRIP, &rowsperstrip);
        if (rowsperstrip > 0)
            m_spec.attribute ("tiff:RowsPerStrip", rowsperstrip);
    }

    // The libtiff docs say that only uncompressed images, or those with
    // rowsperstrip==1, support random access to scanlines.
    m_no_random_access = (compress != COMPRESSION_NONE && rowsperstrip != 1);

    short resunit = -1;
    TIFFGetField (m_tif, TIFFTAG_RESOLUTIONUNIT, &resunit);
    switch (resunit) {
    case RESUNIT_NONE : m_spec.attribute ("ResolutionUnit", "none"); break;
    case RESUNIT_INCH : m_spec.attribute ("ResolutionUnit", "in"); break;
    case RESUNIT_CENTIMETER : m_spec.attribute ("ResolutionUnit", "cm"); break;
    }

    get_matrix_attribute ("worldtocamera", TIFFTAG_PIXAR_MATRIX_WORLDTOCAMERA);
    get_matrix_attribute ("worldtoscreen", TIFFTAG_PIXAR_MATRIX_WORLDTOSCREEN);
    get_int_attribute ("tiff:subfiletype", TIFFTAG_SUBFILETYPE);
    // FIXME -- should subfiletype be "conventionized" and used for all
    // plugins uniformly? 

    // Do we care about fillorder?  No, the TIFF spec says, "We
    // recommend that FillOrder=2 (lsb-to-msb) be used only in
    // special-purpose applications".  So OIIO will assume msb-to-lsb
    // convention until somebody finds a TIFF file in the wild that
    // breaks this assumption.

    // Special names for shadow maps
    char *s = NULL;
    TIFFGetField (m_tif, TIFFTAG_PIXAR_TEXTUREFORMAT, &s);
    if (s)
        m_emulate_mipmap = true;
    if (s && ! strcmp (s, "Shadow")) {
        for (int c = 0;  c < m_spec.nchannels;  ++c)
            m_spec.channelnames[c] = "z";
    }

    unsigned short *sampleinfo = NULL;
    unsigned short extrasamples = 0;
    TIFFGetFieldDefaulted (m_tif, TIFFTAG_EXTRASAMPLES, &extrasamples, &sampleinfo);
    // std::cerr << "Extra samples = " << extrasamples << "\n";
    bool alpha_is_unassociated = false;  // basic assumption
    if (extrasamples) {
        // If the TIFF ExtraSamples tag was specified, use that to figure
        // out the meaning of alpha.
        int colorchannels = 3;
        if (m_photometric == PHOTOMETRIC_MINISWHITE ||
              m_photometric == PHOTOMETRIC_MINISBLACK ||
              m_photometric == PHOTOMETRIC_PALETTE ||
              m_photometric == PHOTOMETRIC_MASK)
            colorchannels = 1;
        for (int i = 0, c = colorchannels;
             i < extrasamples && c < m_spec.nchannels;  ++i, ++c) {
            // std::cerr << "   extra " << i << " " << sampleinfo[i] << "\n";
            if (sampleinfo[i] == EXTRASAMPLE_ASSOCALPHA) {
                // This is the alpha channel, associated as usual
                m_spec.alpha_channel = c;
            } else if (sampleinfo[i] == EXTRASAMPLE_UNASSALPHA) {
                // This is the alpha channel, but color is unassociated
                m_spec.alpha_channel = c;
                alpha_is_unassociated = true;
                m_spec.attribute ("oiio:UnassociatedAlpha", 1);
            } else {
                DASSERT (sampleinfo[i] == EXTRASAMPLE_UNSPECIFIED);
                // This extra channel is not alpha at all.  Undo any
                // assumptions we previously made about this channel.
                if (m_spec.alpha_channel == c) {
                    m_spec.channelnames[c] = Strutil::format("channel%d", c);
                    m_spec.alpha_channel = -1;
                }
            }
        }
        if (m_spec.alpha_channel >= 0)
            m_spec.channelnames[m_spec.alpha_channel] = "A";
    }
    // Will we need to do alpha conversions?
    m_convert_alpha = (m_spec.alpha_channel >= 0 && alpha_is_unassociated &&
                       ! m_keep_unassociated_alpha);

    // N.B. we currently ignore the following TIFF fields:
    // GrayResponseCurve GrayResponseUnit
    // MaxSampleValue MinSampleValue
    // NewSubfileType SubfileType(deprecated)
    // Colorimetry fields

    // Search for an EXIF IFD in the TIFF file, and if found, rummage 
    // around for Exif fields.
#if TIFFLIB_VERSION > 20050912    /* compat with old TIFF libs - skip Exif */
    int exifoffset = 0;
    if (TIFFGetField (m_tif, TIFFTAG_EXIFIFD, &exifoffset) &&
            TIFFReadEXIFDirectory (m_tif, exifoffset)) {
        for (int i = 0;  exif_tag_table[i].name;  ++i)
            find_tag (exif_tag_table[i].tifftag, exif_tag_table[i].tifftype,
                      exif_tag_table[i].name);
        // I'm not sure what state TIFFReadEXIFDirectory leaves us.
        // So to be safe, close and re-seek.
        TIFFClose (m_tif);
#ifdef _WIN32
        std::wstring wfilename = Filesystem::path_to_windows_native (m_filename);
        m_tif = TIFFOpenW (wfilename.c_str(), "rm");
#else
        m_tif = TIFFOpen (m_filename.c_str(), "rm");
#endif
        TIFFSetDirectory (m_tif, m_subimage);

        // A few tidbits to look for
        ImageIOParameter *p;
        if ((p = m_spec.find_attribute ("Exif:ColorSpace", TypeDesc::INT))) {
            // Exif spec says that anything other than 0xffff==uncalibrated
            // should be interpreted to be sRGB.
            if (*(const int *)p->data() != 0xffff)
                m_spec.attribute ("oiio::ColorSpace", "sRGB");
        }
    }
#endif

#if TIFFLIB_VERSION >= 20051230
    // Search for IPTC metadata in IIM form -- but older versions of
    // libtiff botch the size, so ignore it for very old libtiff.
    int iptcsize = 0;
    const void *iptcdata = NULL;
    if (TIFFGetField (m_tif, TIFFTAG_RICHTIFFIPTC, &iptcsize, &iptcdata)) {
        std::vector<uint32> iptc ((uint32 *)iptcdata, (uint32 *)iptcdata+iptcsize);
        if (TIFFIsByteSwapped (m_tif))
            TIFFSwabArrayOfLong ((uint32*)&iptc[0], iptcsize);
        decode_iptc_iim (&iptc[0], iptcsize*4, m_spec);
    }
#endif

    // Search for an XML packet containing XMP (IPTC, Exif, etc.)
    int xmlsize = 0;
    const void *xmldata = NULL;
    if (TIFFGetField (m_tif, TIFFTAG_XMLPACKET, &xmlsize, &xmldata)) {
        // std::cerr << "Found XML data, size " << xmlsize << "\n";
        if (xmldata && xmlsize) {
            std::string xml ((const char *)xmldata, xmlsize);
            decode_xmp (xml, m_spec);
        }
    }

#if 0
    // Experimental -- look for photoshop data
    int photoshopsize = 0;
    const void *photoshopdata = NULL;
    if (TIFFGetField (m_tif, TIFFTAG_PHOTOSHOP, &photoshopsize, &photoshopdata)) {
        std::cerr << "Found PHOTOSHOP data, size " << photoshopsize << "\n";
        if (photoshopdata && photoshopsize) {
//            std::string photoshop ((const char *)photoshopdata, photoshopsize);
//            std::cerr << "PHOTOSHOP:\n" << photoshop << "\n---\n";
        }
    }
#endif
}
Пример #22
0
bool
GIFInput::read_subimage_metadata (ImageSpec &newspec,
                                  GifColorType **local_colormap)
{
    newspec = ImageSpec (TypeDesc::UINT8);
    newspec.nchannels = 3;

    GifRecordType m_gif_rec_type;
    do {
        if (DGifGetRecordType (m_gif_file, &m_gif_rec_type) == GIF_ERROR) {
            report_last_error ();
            return false;
        }

        switch (m_gif_rec_type) {
        case IMAGE_DESC_RECORD_TYPE:
            if (DGifGetImageDesc (m_gif_file) == GIF_ERROR) {
                report_last_error ();
                return false;
            }

            newspec.width = m_gif_file->Image.Width;
            newspec.height = m_gif_file->Image.Height;

            // read local colormap
            if (m_gif_file->Image.ColorMap != NULL) {
                *local_colormap = m_gif_file->Image.ColorMap->Colors;
            } else {
                *local_colormap = NULL;
            }

            break;

        case EXTENSION_RECORD_TYPE:
            int ext_code;
            GifByteType *ext;
            if (DGifGetExtension(m_gif_file, &ext_code, &ext) == GIF_ERROR)
            {
                report_last_error ();
                return false;
            }
            read_gif_extension (ext_code, ext, newspec);

            while (ext != NULL)
            {
                if (DGifGetExtensionNext(m_gif_file, &ext) == GIF_ERROR)
                {
                    report_last_error ();
                    return false;
                }

                if (ext != NULL) {
                    read_gif_extension (ext_code, ext, newspec);
                }
            }

            break;

        case TERMINATE_RECORD_TYPE:
            return false;
            break;

        default:
            break;
        }
    } while (m_gif_rec_type != IMAGE_DESC_RECORD_TYPE);

    newspec.attribute ("gif:Interlacing", m_gif_file->Image.Interlace);

    newspec.default_channel_names ();
    if (newspec.nchannels == 4) {
        newspec.alpha_channel = 4;
    }

    return true;
}
Пример #23
0
bool
RawInput::open_raw (bool unpack, const std::string &name,
                    const ImageSpec &config)
{
    // std::cout << "open_raw " << name << " unpack=" << unpack << "\n";
    ASSERT (! m_processor);
    m_processor.reset (new LibRaw);

    // Temp spec for exif parser callback to dump into
    ImageSpec exifspec;
#if LIBRAW_VERSION >= LIBRAW_MAKE_VERSION(0,17,0)
    m_processor->set_exifparser_handler ((exif_parser_callback)exif_parser_cb,
                                        &exifspec);
#endif

    int ret;
    if ( (ret = m_processor->open_file(name.c_str()) ) != LIBRAW_SUCCESS) {
        error ("Could not open file \"%s\", %s", m_filename, libraw_strerror(ret));
        return false;
    }

    ASSERT (!m_unpacked);
    if (unpack) {
        if ( (ret = m_processor->unpack() ) != LIBRAW_SUCCESS) {
            error ("Could not unpack \"%s\", %s", m_filename, libraw_strerror(ret));
            return false;
        }
    }
    m_processor->adjust_sizes_info_only();

    // Set file information
    m_spec = ImageSpec(m_processor->imgdata.sizes.iwidth,
                       m_processor->imgdata.sizes.iheight,
                       3, // LibRaw should only give us 3 channels
                       TypeDesc::UINT16);
    // Move the exif attribs we already read into the spec we care about
    m_spec.extra_attribs.swap (exifspec.extra_attribs);

    // Output 16 bit images
    m_processor->imgdata.params.output_bps = 16;

    // Set the gamma curve to Linear
    m_spec.attribute("oiio:ColorSpace","Linear");
    m_processor->imgdata.params.gamm[0] = 1.0;
    m_processor->imgdata.params.gamm[1] = 1.0;

    // Disable exposure correction (unless config "raw:auto_bright" == 1)
    m_processor->imgdata.params.no_auto_bright =
        ! config.get_int_attribute("raw:auto_bright", 0);
    // Use camera white balance if "raw:use_camera_wb" is not 0
    m_processor->imgdata.params.use_camera_wb =
        config.get_int_attribute("raw:use_camera_wb", 1);
    // Turn off maximum threshold value (unless set to non-zero)
    m_processor->imgdata.params.adjust_maximum_thr =
        config.get_float_attribute("raw:adjust_maximum_thr", 0.0f);
    // Set camera maximum value if "raw:user_sat" is not 0
    m_processor->imgdata.params.user_sat =
        config.get_int_attribute("raw:user_sat", 0);

    // Use embedded color profile. Values mean:
    // 0: do not use embedded color profile
    // 1 (default): use embedded color profile (if present) for DNG files
    //    (always), for other files only if use_camera_wb is set.
    // 3: use embedded color data (if present) regardless of white
    //    balance setting.
    m_processor->imgdata.params.use_camera_matrix =
        config.get_int_attribute("raw:use_camera_matrix", 1);


    // Check to see if the user has explicitly set the output colorspace primaries
    std::string cs = config.get_string_attribute ("raw:ColorSpace", "sRGB");
    if (cs.size()) {
        static const char *colorspaces[] = { "raw",
                                             "sRGB",
                                             "Adobe",
                                             "Wide",
                                             "ProPhoto",
                                             "XYZ",
#if LIBRAW_VERSION >= LIBRAW_MAKE_VERSION(0,18,0)
                                             "ACES",
#endif
                                             NULL
                                             };

        size_t c;
        for (c=0; colorspaces[c]; c++)
            if (Strutil::iequals (cs, colorspaces[c]))
                break;
        if (colorspaces[c])
            m_processor->imgdata.params.output_color = c;
        else {
#if LIBRAW_VERSION < LIBRAW_MAKE_VERSION(0,18,0)
            if (cs == "ACES")
                error ("raw:ColorSpace value of \"ACES\" is not supported by libRaw %d.%d.%d",
                       LIBRAW_MAJOR_VERSION, LIBRAW_MINOR_VERSION,
                       LIBRAW_PATCH_VERSION);
            else
#endif
            error("raw:ColorSpace set to unknown value");
            return false;
        }
        // Set the attribute in the output spec
        m_spec.attribute("raw:ColorSpace", cs);
    } else {
        // By default we use sRGB primaries for simplicity
        m_processor->imgdata.params.output_color = 1;
        m_spec.attribute("raw:ColorSpace", "sRGB");
    }

    // Exposure adjustment
    float exposure = config.get_float_attribute ("raw:Exposure", -1.0f);
    if (exposure >= 0.0f) {
        if (exposure < 0.25f || exposure > 8.0f) {
            error("raw:Exposure invalid value. range 0.25f - 8.0f");
            return false;
        }
        m_processor->imgdata.params.exp_correc = 1; // enable exposure correction
        m_processor->imgdata.params.exp_shift = exposure; // set exposure correction
        // Set the attribute in the output spec
        m_spec.attribute ("raw:Exposure", exposure);
    }

    // Highlight adjustment
    int highlight_mode = config.get_int_attribute("raw:HighlightMode", 0);
    if (highlight_mode != 0)
    {
        if (highlight_mode < 0 || highlight_mode > 9) {
            error("raw:HighlightMode invalid value. range 0-9");
            return false;
        }
        m_processor->imgdata.params.highlight = highlight_mode;
        m_spec.attribute ("raw:HighlightMode", highlight_mode);
    }

    // Interpolation quality
    // note: LibRaw must be compiled with demosaic pack GPL2 to use demosaic
    // algorithms 5-9. It must be compiled with demosaic pack GPL3 for
    // algorithm 10 (AMAzE). If either of these packs are not included, it
    // will silently use option 3 - AHD.
    std::string demosaic = config.get_string_attribute ("raw:Demosaic");
    if (demosaic.size()) {
        static const char *demosaic_algs[] = { "linear",
                                               "VNG",
                                               "PPG",
                                               "AHD",
                                               "DCB",
                                               "AHD-Mod",
                                               "AFD",
                                               "VCD",
                                               "Mixed",
                                               "LMMSE",
                                               "AMaZE",
#if LIBRAW_VERSION >= LIBRAW_MAKE_VERSION(0,16,0)
                                               "DHT",
                                               "AAHD",
#endif
                                               // Future demosaicing algorithms should go here
                                               NULL
                                               };
        size_t d;
        for (d=0; demosaic_algs[d]; d++)
            if (Strutil::iequals (demosaic, demosaic_algs[d]))
                break;
        if (demosaic_algs[d])
            m_processor->imgdata.params.user_qual = d;
        else if (Strutil::iequals (demosaic, "none")) {
#ifdef LIBRAW_DECODER_FLATFIELD
            // See if we can access the Bayer patterned data for this raw file
            libraw_decoder_info_t decoder_info;
            m_processor->get_decoder_info(&decoder_info);
            if (!(decoder_info.decoder_flags & LIBRAW_DECODER_FLATFIELD)) {
                error("Unable to extract unbayered data from file \"%s\"", name.c_str());
                return false;
            }

#endif
            // User has selected no demosaicing, so no processing needs to be done
            m_process = false;

            // This will read back a single, bayered channel
            m_spec.nchannels = 1;
            m_spec.channelnames.clear();
            m_spec.channelnames.emplace_back("Y");

            // Also, any previously set demosaicing options are void, so remove them
            m_spec.erase_attribute("oiio:Colorspace");
            m_spec.erase_attribute("raw:Colorspace");
            m_spec.erase_attribute("raw:Exposure");
        }
        else {
            error("raw:Demosaic set to unknown value");
            return false;
        }
        // Set the attribute in the output spec
        m_spec.attribute("raw:Demosaic", demosaic);
    } else {
        m_processor->imgdata.params.user_qual = 3;
        m_spec.attribute("raw:Demosaic", "AHD");
    }

    // Metadata

    const libraw_image_sizes_t &sizes (m_processor->imgdata.sizes);
    m_spec.attribute ("PixelAspectRatio", (float)sizes.pixel_aspect);
    // FIXME: sizes. top_margin, left_margin, raw_pitch, mask?

    const libraw_iparams_t &idata (m_processor->imgdata.idata);
    if (idata.make[0])
        m_spec.attribute ("Make", idata.make);
    if (idata.model[0])
        m_spec.attribute ("Model", idata.model);
    // FIXME: idata. dng_version, is_foveon, colors, filters, cdesc

    const libraw_colordata_t &color (m_processor->imgdata.color);
    m_spec.attribute("Exif:Flash", (int) color.flash_used);
    if (color.model2[0])
        m_spec.attribute ("Software", color.model2);

    // FIXME -- all sorts of things in this struct

    const libraw_imgother_t &other (m_processor->imgdata.other);
    m_spec.attribute ("Exif:ISOSpeedRatings", (int) other.iso_speed);
    m_spec.attribute ("ExposureTime", other.shutter);
    m_spec.attribute ("Exif:ShutterSpeedValue", -log2f(other.shutter));
    m_spec.attribute ("FNumber", other.aperture);
    m_spec.attribute ("Exif:ApertureValue", 2.0f * log2f(other.aperture));
    m_spec.attribute ("Exif:FocalLength", other.focal_len);
    struct tm * m_tm = localtime(&m_processor->imgdata.other.timestamp);
    char datetime[20];
    strftime (datetime, 20, "%Y-%m-%d %H:%M:%S", m_tm);
    m_spec.attribute ("DateTime", datetime);
    // FIXME: other.shot_order
    // FIXME: other.gpsdata
    if (other.desc[0])
        m_spec.attribute ("ImageDescription", other.desc);
    if (other.artist[0])
        m_spec.attribute ("Artist", other.artist);

    // libraw reoriented the image for us, so squash any orientation
    // metadata we may have found in the Exif. Preserve the original as
    // "raw:Orientation".
    m_spec.attribute ("Orientation", m_spec.get_int_attribute("Orientation", 1));
    m_spec.attribute ("Orientation", 1);

    // FIXME -- thumbnail possibly in m_processor->imgdata.thumbnail

    return true;
}
Пример #24
0
void
TIFFInput::readspec ()
{
    uint32 width = 0, height = 0, depth = 0;
    unsigned short nchans = 1;
    TIFFGetField (m_tif, TIFFTAG_IMAGEWIDTH, &width);
    TIFFGetField (m_tif, TIFFTAG_IMAGELENGTH, &height);
    TIFFGetFieldDefaulted (m_tif, TIFFTAG_IMAGEDEPTH, &depth);
    TIFFGetFieldDefaulted (m_tif, TIFFTAG_SAMPLESPERPIXEL, &nchans);

    m_spec = ImageSpec ((int)width, (int)height, (int)nchans);

    float x = 0, y = 0;
    TIFFGetField (m_tif, TIFFTAG_XPOSITION, &x);
    TIFFGetField (m_tif, TIFFTAG_YPOSITION, &y);
    m_spec.x = (int)x;
    m_spec.y = (int)y;
    m_spec.z = 0;
    // FIXME? - TIFF spec describes the positions as in resolutionunit.
    // What happens if this is not unitless pixels?  Are we interpreting
    // it all wrong?

    if (TIFFGetField (m_tif, TIFFTAG_PIXAR_IMAGEFULLWIDTH, &width) == 1
          && width > 0)
        m_spec.full_width = width;
    if (TIFFGetField (m_tif, TIFFTAG_PIXAR_IMAGEFULLLENGTH, &height) == 1
          && height > 0)
        m_spec.full_height = height;

    if (TIFFIsTiled (m_tif)) {
        TIFFGetField (m_tif, TIFFTAG_TILEWIDTH, &m_spec.tile_width);
        TIFFGetField (m_tif, TIFFTAG_TILELENGTH, &m_spec.tile_height);
        TIFFGetFieldDefaulted (m_tif, TIFFTAG_TILEDEPTH, &m_spec.tile_depth);
    } else {
        m_spec.tile_width = 0;
        m_spec.tile_height = 0;
        m_spec.tile_depth = 0;
    }

    m_bitspersample = 8;
    TIFFGetField (m_tif, TIFFTAG_BITSPERSAMPLE, &m_bitspersample);
    m_spec.attribute ("oiio:BitsPerSample", (int)m_bitspersample);

    unsigned short sampleformat = SAMPLEFORMAT_UINT;
    TIFFGetFieldDefaulted (m_tif, TIFFTAG_SAMPLEFORMAT, &sampleformat);
    switch (m_bitspersample) {
    case 1:
    case 2:
    case 4:
        // Make 1, 2, 4 bpp look like byte images
    case 8:
        if (sampleformat == SAMPLEFORMAT_UINT)
            m_spec.set_format (TypeDesc::UINT8);
        else if (sampleformat == SAMPLEFORMAT_INT)
            m_spec.set_format (TypeDesc::INT8);
        else m_spec.set_format (TypeDesc::UINT8);  // punt
        break;
    case 16:
        if (sampleformat == SAMPLEFORMAT_UINT)
            m_spec.set_format (TypeDesc::UINT16);
        else if (sampleformat == SAMPLEFORMAT_INT)
            m_spec.set_format (TypeDesc::INT16);
        break;
    case 32:
        if (sampleformat == SAMPLEFORMAT_IEEEFP)
            m_spec.set_format (TypeDesc::FLOAT);
        break;
    case 64:
        if (sampleformat == SAMPLEFORMAT_IEEEFP)
            m_spec.set_format (TypeDesc::DOUBLE);
        break;
    default:
        m_spec.set_format (TypeDesc::UNKNOWN);
        break;
    }

    // Use the table for all the obvious things that can be mindlessly
    // shoved into the image spec.
    for (int i = 0;  tiff_tag_table[i].name;  ++i)
        find_tag (tiff_tag_table[i].tifftag,
                  tiff_tag_table[i].tifftype, tiff_tag_table[i].name);

    // Now we need to get fields "by hand" for anything else that is less
    // straightforward...

    m_photometric = (m_spec.nchannels == 1 ? PHOTOMETRIC_MINISBLACK : PHOTOMETRIC_RGB);
    TIFFGetField (m_tif, TIFFTAG_PHOTOMETRIC, &m_photometric);
    m_spec.attribute ("tiff:PhotometricInterpretation", (int)m_photometric);
    if (m_photometric == PHOTOMETRIC_PALETTE) {
        // Read the color map
        unsigned short *r = NULL, *g = NULL, *b = NULL;
        TIFFGetField (m_tif, TIFFTAG_COLORMAP, &r, &g, &b);
        ASSERT (r != NULL && g != NULL && b != NULL);
        m_colormap.clear ();
        m_colormap.insert (m_colormap.end(), r, r + (1 << m_bitspersample));
        m_colormap.insert (m_colormap.end(), g, g + (1 << m_bitspersample));
        m_colormap.insert (m_colormap.end(), b, b + (1 << m_bitspersample));
        // Palette TIFF images are always 3 channels (to the client)
        m_spec.nchannels = 3;
        m_spec.default_channel_names ();
    }

    TIFFGetFieldDefaulted (m_tif, TIFFTAG_PLANARCONFIG, &m_planarconfig);
    m_spec.attribute ("tiff:PlanarConfiguration", (int)m_planarconfig);
    if (m_planarconfig == PLANARCONFIG_SEPARATE)
        m_spec.attribute ("planarconfig", "separate");
    else
        m_spec.attribute ("planarconfig", "contig");

    int compress = 0;
    TIFFGetFieldDefaulted (m_tif, TIFFTAG_COMPRESSION, &compress);
    m_spec.attribute ("tiff:Compression", compress);
    switch (compress) {
    case COMPRESSION_NONE :
        m_spec.attribute ("compression", "none");
        break;
    case COMPRESSION_LZW :
        m_spec.attribute ("compression", "lzw");
        break;
    case COMPRESSION_CCITTRLE :
        m_spec.attribute ("compression", "ccittrle");
        break;
    case COMPRESSION_DEFLATE :
    case COMPRESSION_ADOBE_DEFLATE :
        m_spec.attribute ("compression", "zip");
        break;
    case COMPRESSION_PACKBITS :
        m_spec.attribute ("compression", "packbits");
        break;
    default:
        break;
    }

    int rowsperstrip = -1;
    if (! m_spec.tile_width) {
        TIFFGetField (m_tif, TIFFTAG_ROWSPERSTRIP, &rowsperstrip);
        if (rowsperstrip > 0)
            m_spec.attribute ("tiff:RowsPerStrip", rowsperstrip);
    }

    // The libtiff docs say that only uncompressed images, or those with
    // rowsperstrip==1, support random access to scanlines.
    m_no_random_access = (compress != COMPRESSION_NONE && rowsperstrip != 1);

    short resunit = -1;
    TIFFGetField (m_tif, TIFFTAG_RESOLUTIONUNIT, &resunit);
    switch (resunit) {
    case RESUNIT_NONE : m_spec.attribute ("ResolutionUnit", "none"); break;
    case RESUNIT_INCH : m_spec.attribute ("ResolutionUnit", "in"); break;
    case RESUNIT_CENTIMETER : m_spec.attribute ("ResolutionUnit", "cm"); break;
    }

    get_matrix_attribute ("worldtocamera", TIFFTAG_PIXAR_MATRIX_WORLDTOCAMERA);
    get_matrix_attribute ("worldtoscreen", TIFFTAG_PIXAR_MATRIX_WORLDTOSCREEN);
    get_int_attribute ("tiff:subfiletype", TIFFTAG_SUBFILETYPE);
    // FIXME -- should subfiletype be "conventionized" and used for all
    // plugins uniformly? 

    // FIXME: do we care about fillorder for 1-bit and 4-bit images?

    // Special names for shadow maps
    char *s = NULL;
    TIFFGetField (m_tif, TIFFTAG_PIXAR_TEXTUREFORMAT, &s);
    if (s)
        m_emulate_mipmap = true;
    if (s && ! strcmp (s, "Shadow")) {
        for (int c = 0;  c < m_spec.nchannels;  ++c)
            m_spec.channelnames[c] = "z";
    }

    // N.B. we currently ignore the following TIFF fields:
    // ExtraSamples
    // GrayResponseCurve GrayResponseUnit
    // MaxSampleValue MinSampleValue
    // NewSubfileType SubfileType(deprecated)
    // Colorimetry fields

    // Search for an EXIF IFD in the TIFF file, and if found, rummage 
    // around for Exif fields.
#if TIFFLIB_VERSION > 20050912    /* compat with old TIFF libs - skip Exif */
    int exifoffset = 0;
    if (TIFFGetField (m_tif, TIFFTAG_EXIFIFD, &exifoffset) &&
            TIFFReadEXIFDirectory (m_tif, exifoffset)) {
        for (int i = 0;  exif_tag_table[i].name;  ++i)
            find_tag (exif_tag_table[i].tifftag, exif_tag_table[i].tifftype,
                      exif_tag_table[i].name);
        // I'm not sure what state TIFFReadEXIFDirectory leaves us.
        // So to be safe, close and re-seek.
        TIFFClose (m_tif);
        m_tif = TIFFOpen (m_filename.c_str(), "rm");
        TIFFSetDirectory (m_tif, m_subimage);

        // A few tidbits to look for
        ImageIOParameter *p;
        if ((p = m_spec.find_attribute ("Exif:ColorSpace", TypeDesc::INT))) {
            // Exif spec says that anything other than 0xffff==uncalibrated
            // should be interpreted to be sRGB.
            if (*(const int *)p->data() != 0xffff)
                m_spec.attribute ("oiio::ColorSpace", "sRGB");
        }
    }
#endif

#if TIFFLIB_VERSION >= 20051230
    // Search for IPTC metadata in IIM form -- but older versions of
    // libtiff botch the size, so ignore it for very old libtiff.
    int iptcsize = 0;
    const void *iptcdata = NULL;
    if (TIFFGetField (m_tif, TIFFTAG_RICHTIFFIPTC, &iptcsize, &iptcdata)) {
        std::vector<uint32> iptc ((uint32 *)iptcdata, (uint32 *)iptcdata+iptcsize);
        if (TIFFIsByteSwapped (m_tif))
            TIFFSwabArrayOfLong ((uint32*)&iptc[0], iptcsize);
        decode_iptc_iim (&iptc[0], iptcsize*4, m_spec);
    }
#endif

    // Search for an XML packet containing XMP (IPTC, Exif, etc.)
    int xmlsize = 0;
    const void *xmldata = NULL;
    if (TIFFGetField (m_tif, TIFFTAG_XMLPACKET, &xmlsize, &xmldata)) {
        // std::cerr << "Found XML data, size " << xmlsize << "\n";
        if (xmldata && xmlsize) {
            std::string xml ((const char *)xmldata, xmlsize);
            decode_xmp (xml, m_spec);
        }
    }

#if 0
    // Experimental -- look for photoshop data
    int photoshopsize = 0;
    const void *photoshopdata = NULL;
    if (TIFFGetField (m_tif, TIFFTAG_PHOTOSHOP, &photoshopsize, &photoshopdata)) {
        std::cerr << "Found PHOTOSHOP data, size " << photoshopsize << "\n";
        if (photoshopdata && photoshopsize) {
//            std::string photoshop ((const char *)photoshopdata, photoshopsize);
//            std::cerr << "PHOTOSHOP:\n" << photoshop << "\n---\n";
        }
    }
#endif
}
Пример #25
0
bool
OpenEXRInput::open (const std::string &name, ImageSpec &newspec)
{
    // Quick check to reject non-exr files
    if (! Filesystem::is_regular (name)) {
        error ("Could not open file \"%s\"", name.c_str());
        return false;
    }
    bool tiled;
    if (! Imf::isOpenExrFile (name.c_str(), tiled)) {
        error ("\"%s\" is not an OpenEXR file", name.c_str());
        return false;
    }

    pvt::set_exr_threads ();

    m_spec = ImageSpec(); // Clear everything with default constructor
    
    try {
        m_input_stream = new OpenEXRInputStream (name.c_str());
    }
    catch (const std::exception &e) {
        m_input_stream = NULL;
        error ("OpenEXR exception: %s", e.what());
        return false;
    }

#ifdef USE_OPENEXR_VERSION2
    try {
        m_input_multipart = new Imf::MultiPartInputFile (*m_input_stream);
    }
    catch (const std::exception &e) {
        delete m_input_stream;
        m_input_stream = NULL;
        error ("OpenEXR exception: %s", e.what());
        return false;
    }

    m_nsubimages = m_input_multipart->parts();

#else
    try {
        if (tiled) {
            m_input_tiled = new Imf::TiledInputFile (*m_input_stream);
        } else {
            m_input_scanline = new Imf::InputFile (*m_input_stream);
        }
    }
    catch (const std::exception &e) {
        delete m_input_stream;
        m_input_stream = NULL;
        error ("OpenEXR exception: %s", e.what());
        return false;
    }

    if (! m_input_scanline && ! m_input_tiled) {
        error ("Unknown error opening EXR file");
        return false;
    }

    m_nsubimages = 1;  // OpenEXR 1.x did not have multipart
#endif

    m_parts.resize (m_nsubimages);
    m_subimage = -1;
    m_miplevel = -1;
    bool ok = seek_subimage (0, 0, newspec);
    if (! ok)
        close ();
    return ok;
}
Пример #26
0
bool
SgiInput::open (const std::string &name, ImageSpec &spec)
{
    // saving name for later use
    m_filename = name;

    m_fd = Filesystem::fopen (m_filename, "rb");
    if (!m_fd) {
        error ("Could not open file \"%s\"", name.c_str());
        return false;
    }

    if (! read_header ())
        return false;

    if (m_sgi_header.magic != sgi_pvt::SGI_MAGIC) {
        error ("\"%s\" is not a SGI file, magic number doesn't match",
               m_filename.c_str());
        close ();
        return false;
    }

    int height = 0;
    int nchannels = 0;
    switch (m_sgi_header.dimension) {
        case sgi_pvt::ONE_SCANLINE_ONE_CHANNEL:
          height = 1;
          nchannels = 1;
          break;
        case sgi_pvt::MULTI_SCANLINE_ONE_CHANNEL:
          height = m_sgi_header.ysize;
          nchannels = 1;
          break;
        case sgi_pvt::MULTI_SCANLINE_MULTI_CHANNEL:
          height = m_sgi_header.ysize;
          nchannels = m_sgi_header.zsize;
          break;
        default:
          error ("Bad dimension: %d", m_sgi_header.dimension);
          close ();
          return false;
    }

    if (m_sgi_header.colormap == sgi_pvt::COLORMAP
            || m_sgi_header.colormap == sgi_pvt::SCREEN) {
        error ("COLORMAP and SCREEN color map types aren't supported");
        close ();
        return false;
    }

    m_spec = ImageSpec (m_sgi_header.xsize, height, nchannels,
                        m_sgi_header.bpc == 1 ? TypeDesc::UINT8 : TypeDesc::UINT16);
    if (strlen (m_sgi_header.imagename))
        m_spec.attribute("ImageDescription", m_sgi_header.imagename);

    if (m_sgi_header.storage == sgi_pvt::RLE) {
        m_spec.attribute("compression", "rle");
        if (! read_offset_tables ())
            return false;
    }

    spec = m_spec;
    return true;
}
Пример #27
0
bool ImageManager::file_load_image_generic(Image *img,
                                           ImageInput **in,
                                           int &width,
                                           int &height,
                                           int &depth,
                                           int &components)
{
	if(img->filename == "")
		return false;

	if(!img->builtin_data) {
		/* NOTE: Error logging is done in meta data acquisition. */
		if(!path_exists(img->filename) || path_is_directory(img->filename)) {
			return false;
		}

		/* load image from file through OIIO */
		*in = ImageInput::create(img->filename);

		if(!*in)
			return false;

		ImageSpec spec = ImageSpec();
		ImageSpec config = ImageSpec();

		if(img->use_alpha == false)
			config.attribute("oiio:UnassociatedAlpha", 1);

		if(!(*in)->open(img->filename, spec, config)) {
			delete *in;
			*in = NULL;
			return false;
		}

		width = spec.width;
		height = spec.height;
		depth = spec.depth;
		components = spec.nchannels;
	}
	else {
		/* load image using builtin images callbacks */
		if(!builtin_image_info_cb || !builtin_image_pixels_cb)
			return false;

		ImageMetaData metadata;
		builtin_image_info_cb(img->filename, img->builtin_data, metadata);

		width = metadata.width;
		height = metadata.height;
		depth = metadata.depth;
		components = metadata.channels;
	}

	/* we only handle certain number of components */
	if(!(components >= 1 && components <= 4)) {
		if(*in) {
			(*in)->close();
			delete *in;
			*in = NULL;
		}

		return false;
	}

	return true;
}
Пример #28
0
bool
SocketInput::open (const std::string &name, ImageSpec &newspec)
{
    return open (name, newspec, ImageSpec());
}
Пример #29
0
bool
JpgInput::open (const std::string &name, ImageSpec &newspec)
{
    // Check that file exists and can be opened
    m_filename = name;
    m_fd = Filesystem::fopen (name, "rb");
    if (m_fd == NULL) {
        error ("Could not open file \"%s\"", name.c_str());
        return false;
    }

    // Check magic number to assure this is a JPEG file
    uint8_t magic[2] = {0, 0};
    if (fread (magic, sizeof(magic), 1, m_fd) != 1) {
        error ("Empty file \"%s\"", name.c_str());
        close_file ();
        return false;
    }

    rewind (m_fd);
    if (magic[0] != JPEG_MAGIC1 || magic[1] != JPEG_MAGIC2) {
        close_file ();
        error ("\"%s\" is not a JPEG file, magic number doesn't match (was 0x%x%x)",
               name.c_str(), int(magic[0]), int(magic[1]));
        return false;
    }

    // Set up the normal JPEG error routines, then override error_exit and
    // output_message so we intercept all the errors.
    m_cinfo.err = jpeg_std_error ((jpeg_error_mgr *)&m_jerr);
    m_jerr.pub.error_exit = my_error_exit;
    m_jerr.pub.output_message = my_output_message;
    if (setjmp (m_jerr.setjmp_buffer)) {
        // Jump to here if there's a libjpeg internal error
        // Prevent memory leaks, see example.c in jpeg distribution
        jpeg_destroy_decompress (&m_cinfo);
        close_file ();
        return false;
    }

    jpeg_create_decompress (&m_cinfo);          // initialize decompressor
    jpeg_stdio_src (&m_cinfo, m_fd);            // specify the data source

    // Request saving of EXIF and other special tags for later spelunking
    for (int mark = 0;  mark < 16;  ++mark)
        jpeg_save_markers (&m_cinfo, JPEG_APP0+mark, 0xffff);
    jpeg_save_markers (&m_cinfo, JPEG_COM, 0xffff);     // comment marker

    // read the file parameters
    if (jpeg_read_header (&m_cinfo, FALSE) != JPEG_HEADER_OK || m_fatalerr) {
        error ("Bad JPEG header for \"%s\"", filename().c_str());
        return false;
    }

    int nchannels = m_cinfo.num_components;

    if (m_cinfo.jpeg_color_space == JCS_CMYK ||
        m_cinfo.jpeg_color_space == JCS_YCCK) {
        // CMYK jpegs get converted by us to RGB
        m_cinfo.out_color_space = JCS_CMYK;     // pre-convert YCbCrK->CMYK
        nchannels = 3;
        m_cmyk = true;
    }

    if (m_raw)
        m_coeffs = jpeg_read_coefficients (&m_cinfo);
    else
        jpeg_start_decompress (&m_cinfo);       // start working
    if (m_fatalerr)
        return false;
    m_next_scanline = 0;                        // next scanline we'll read

    m_spec = ImageSpec (m_cinfo.output_width, m_cinfo.output_height,
                        nchannels, TypeDesc::UINT8);

    // Assume JPEG is in sRGB unless the Exif or XMP tags say otherwise.
    m_spec.attribute ("oiio:ColorSpace", "sRGB");

    if (m_cinfo.jpeg_color_space == JCS_CMYK)
        m_spec.attribute ("jpeg:ColorSpace", "CMYK");
    else if (m_cinfo.jpeg_color_space == JCS_YCCK)
        m_spec.attribute ("jpeg:ColorSpace", "YCbCrK");

    // If the chroma subsampling is detected and matches something
    // we expect, then set an attribute so that it can be preserved
    // in future operations.
    std::string subsampling = comp_info_to_attr(m_cinfo);
    if (!subsampling.empty())
        m_spec.attribute(JPEG_SUBSAMPLING_ATTR, subsampling);
        
    for (jpeg_saved_marker_ptr m = m_cinfo.marker_list;  m;  m = m->next) {
        if (m->marker == (JPEG_APP0+1) &&
                ! strcmp ((const char *)m->data, "Exif")) {
            // The block starts with "Exif\0\0", so skip 6 bytes to get
            // to the start of the actual Exif data TIFF directory
            decode_exif ((unsigned char *)m->data+6, m->data_length-6, m_spec);
        }
        else if (m->marker == (JPEG_APP0+1) &&
                 ! strcmp ((const char *)m->data, "http://ns.adobe.com/xap/1.0/")) {
#ifndef NDEBUG
            std::cerr << "Found APP1 XMP! length " << m->data_length << "\n";
#endif
            std::string xml ((const char *)m->data, m->data_length);
            decode_xmp (xml, m_spec);
        }
        else if (m->marker == (JPEG_APP0+13) &&
                ! strcmp ((const char *)m->data, "Photoshop 3.0"))
            jpeg_decode_iptc ((unsigned char *)m->data);
        else if (m->marker == JPEG_COM) {
            if (! m_spec.find_attribute ("ImageDescription", TypeDesc::STRING))
                m_spec.attribute ("ImageDescription",
                                  std::string ((const char *)m->data, m->data_length));
        }
    }

    // Handle density/pixelaspect. We need to do this AFTER the exif is
    // decoded, in case it contains useful information.
    float xdensity = m_spec.get_float_attribute ("XResolution");
    float ydensity = m_spec.get_float_attribute ("YResolution");
    if (! xdensity || ! ydensity) {
        xdensity = float(m_cinfo.X_density);
        ydensity = float(m_cinfo.Y_density);
        if (xdensity && ydensity) {
            m_spec.attribute ("XResolution", xdensity);
            m_spec.attribute ("YResolution", ydensity);
        }
    }
    if (xdensity && ydensity) {
        float aspect = ydensity/xdensity;
        if (aspect != 1.0f)
            m_spec.attribute ("PixelAspectRatio", aspect);
        switch (m_cinfo.density_unit) {
        case 0 : m_spec.attribute ("ResolutionUnit", "none"); break;
        case 1 : m_spec.attribute ("ResolutionUnit", "in");   break;
        case 2 : m_spec.attribute ("ResolutionUnit", "cm");   break;
        }
    }

    read_icc_profile(&m_cinfo, m_spec); /// try to read icc profile

    newspec = m_spec;
    return true;
}
Пример #30
0
bool
RawInput::open (const std::string &name, ImageSpec &newspec,
                const ImageSpec &config)
{
    int ret;

    // open the image
    if ( (ret = m_processor.open_file(name.c_str()) ) != LIBRAW_SUCCESS) {
        error ("Could not open file \"%s\", %s", name.c_str(), libraw_strerror(ret));
        return false;
    }

    if ( (ret = m_processor.unpack() ) != LIBRAW_SUCCESS) {
        error ("Could not unpack \"%s\", %s",name.c_str(), libraw_strerror(ret));
        return false;
    }

    // Forcing the Libraw to adjust sizes based on the capture device orientation
    m_processor.adjust_sizes_info_only();

    // Set file information
    m_spec = ImageSpec(m_processor.imgdata.sizes.iwidth,
                       m_processor.imgdata.sizes.iheight,
                       3, // LibRaw should only give us 3 channels
                       TypeDesc::UINT16);

    // Output 16 bit images
    m_processor.imgdata.params.output_bps = 16;

    // Set the gamma curve to Linear
    m_spec.attribute("oiio:ColorSpace","Linear");
    m_processor.imgdata.params.gamm[0] = 1.0;
    m_processor.imgdata.params.gamm[1] = 1.0;

    // Check to see if the user has explicitly set the output colorspace primaries
    std::string cs = config.get_string_attribute ("raw:ColorSpace", "sRGB");
    if (cs.size()) {
        static const char *colorspaces[] = { "raw",
                                             "sRGB",
                                             "Adobe",
                                             "Wide",
                                             "ProPhoto",
                                             "XYZ", NULL
                                           };

        size_t c;
        for (c=0; c < sizeof(colorspaces) / sizeof(colorspaces[0]); c++)
            if (cs == colorspaces[c])
                break;
        if (cs == colorspaces[c])
            m_processor.imgdata.params.output_color = c;
        else {
            error("raw:ColorSpace set to unknown value");
            return false;
        }
        // Set the attribute in the output spec
        m_spec.attribute("raw:ColorSpace", cs);
    } else {
        // By default we use sRGB primaries for simplicity
        m_processor.imgdata.params.output_color = 1;
        m_spec.attribute("raw:ColorSpace", "sRGB");
    }

    // Exposure adjustment
    float exposure = config.get_float_attribute ("raw:Exposure", -1.0f);
    if (exposure >= 0.0f) {
        if (exposure < 0.25f || exposure > 8.0f) {
            error("raw:Exposure invalid value. range 0.25f - 8.0f");
            return false;
        }
        m_processor.imgdata.params.exp_correc = 1; // enable exposure correction
        m_processor.imgdata.params.exp_shift = exposure; // set exposure correction
        // Set the attribute in the output spec
        m_spec.attribute ("raw:Exposure", exposure);
    }

    // Interpolation quality
    // note: LibRaw must be compiled with demosaic pack GPL2 to use
    // demosaic algorithms 5-9. It must be compiled with demosaic pack GPL3 for
    // algorithm 10. If either of these packs are not includeded, it will silently use option 3 - AHD
    std::string demosaic = config.get_string_attribute ("raw:Demosaic");
    if (demosaic.size()) {
        static const char *demosaic_algs[] = { "linear",
                                               "VNG",
                                               "PPG",
                                               "AHD",
                                               "DCB",
                                               "Modified AHD",
                                               "AFD",
                                               "VCD",
                                               "Mixed",
                                               "LMMSE",
                                               "AMaZE",
                                               // Future demosaicing algorithms should go here
                                               NULL
                                             };
        size_t d;
        for (d=0; d < sizeof(demosaic_algs) / sizeof(demosaic_algs[0]); d++)
            if (demosaic == demosaic_algs[d])
                break;
        if (demosaic == demosaic_algs[d])
            m_processor.imgdata.params.user_qual = d;
        else if (demosaic == "none") {
            // See if we can access the Bayer patterned data for this raw file
            libraw_decoder_info_t decoder_info;
            m_processor.get_decoder_info(&decoder_info);
            if (!(decoder_info.decoder_flags & LIBRAW_DECODER_FLATFIELD)) {
                error("Unable to extract unbayered data from file \"%s\"", name.c_str());
                return false;
            }

            // User has selected no demosaicing, so no processing needs to be done
            m_process = false;

            // The image width and height may be different now, so update with new values
            // Also we will only be reading back a single, bayered channel
            m_spec.width = m_processor.imgdata.sizes.raw_width;
            m_spec.height = m_processor.imgdata.sizes.raw_height;
            m_spec.nchannels = 1;
            m_spec.channelnames.clear();
            m_spec.channelnames.push_back("R");

            // Also, any previously set demosaicing options are void, so remove them
            m_spec.erase_attribute("oiio:Colorspace", TypeDesc::STRING);
            m_spec.erase_attribute("raw:Colorspace", TypeDesc::STRING);
            m_spec.erase_attribute("raw:Exposure", TypeDesc::STRING);

        }
        else {
            error("raw:Demosaic set to unknown value");
            return false;
        }
        // Set the attribute in the output spec
        m_spec.attribute("raw:Demosaic", demosaic);
    } else {
        m_processor.imgdata.params.user_qual = 3;
        m_spec.attribute("raw:Demosaic", "AHD");
    }

    // Metadata

    const libraw_image_sizes_t &sizes (m_processor.imgdata.sizes);
    m_spec.attribute ("PixelAspectRatio", (float)sizes.pixel_aspect);
    // FIXME: sizes. top_margin, left_margin, raw_pitch, flip, mask?

    const libraw_iparams_t &idata (m_processor.imgdata.idata);
    if (idata.make[0])
        m_spec.attribute ("Make", idata.make);
    if (idata.model[0])
        m_spec.attribute ("Model", idata.model);
    // FIXME: idata. dng_version, is_foveon, colors, filters, cdesc

    const libraw_colordata_t &color (m_processor.imgdata.color);
    m_spec.attribute("Exif:Flash", (int) color.flash_used);
    if (color.model2[0])
        m_spec.attribute ("Software", color.model2);

    // FIXME -- all sorts of things in this struct

    const libraw_imgother_t &other (m_processor.imgdata.other);
    m_spec.attribute ("Exif:ISOSpeedRatings", (int) other.iso_speed);
    m_spec.attribute ("ExposureTime", other.shutter);
    m_spec.attribute ("Exif:ShutterSpeedValue", -log2f(other.shutter));
    m_spec.attribute ("FNumber", other.aperture);
    m_spec.attribute ("Exif:ApertureValue", 2.0f * log2f(other.aperture));
    m_spec.attribute ("Exif:FocalLength", other.focal_len);
    struct tm * m_tm = localtime(&m_processor.imgdata.other.timestamp);
    char datetime[20];
    strftime (datetime, 20, "%Y-%m-%d %H:%M:%S", m_tm);
    m_spec.attribute ("DateTime", datetime);
    // FIXME: other.shot_order
    // FIXME: other.gpsdata
    if (other.desc[0])
        m_spec.attribute ("ImageDescription", other.desc);
    if (other.artist[0])
        m_spec.attribute ("Artist", other.artist);

    // FIXME -- thumbnail possibly in m_processor.imgdata.thumbnail

    read_tiff_metadata (name);

    // Copy the spec to return to the user
    newspec = m_spec;
    return true;
}