コード例 #1
0
void
BackendLLVM::llvm_assign_initial_value (const Symbol& sym)
{
    // Don't write over connections!  Connection values are written into
    // our layer when the earlier layer is run, as part of its code.  So
    // we just don't need to initialize it here at all.
    if (sym.valuesource() == Symbol::ConnectedVal &&
          !sym.typespec().is_closure_based())
        return;
    if (sym.typespec().is_closure_based() && sym.symtype() == SymTypeGlobal)
        return;

    int arraylen = std::max (1, sym.typespec().arraylength());

    // Closures need to get their storage before anything can be
    // assigned to them.  Unless they are params, in which case we took
    // care of it in the group entry point.
    if (sym.typespec().is_closure_based() &&
        sym.symtype() != SymTypeParam && sym.symtype() != SymTypeOutputParam) {
        llvm_assign_zero (sym);
        return;
    }

    if ((sym.symtype() == SymTypeLocal || sym.symtype() == SymTypeTemp)
          && shadingsys().debug_uninit()) {
        // Handle the "debug uninitialized values" case
        bool isarray = sym.typespec().is_array();
        int alen = isarray ? sym.typespec().arraylength() : 1;
        llvm::Value *u = NULL;
        if (sym.typespec().is_closure_based()) {
            // skip closures
        }
        else if (sym.typespec().is_floatbased())
            u = ll.constant (std::numeric_limits<float>::quiet_NaN());
        else if (sym.typespec().is_int_based())
            u = ll.constant (std::numeric_limits<int>::min());
        else if (sym.typespec().is_string_based())
            u = ll.constant (Strings::uninitialized_string);
        if (u) {
            for (int a = 0;  a < alen;  ++a) {
                llvm::Value *aval = isarray ? ll.constant(a) : NULL;
                for (int c = 0;  c < (int)sym.typespec().aggregate(); ++c)
                    llvm_store_value (u, sym, 0, aval, c);
            }
        }
        return;
    }

    if ((sym.symtype() == SymTypeLocal || sym.symtype() == SymTypeTemp) &&
        sym.typespec().is_string_based()) {
        // Strings are pointers.  Can't take any chance on leaving
        // local/tmp syms uninitialized.
        llvm_assign_zero (sym);
        return;  // we're done, the parts below are just for params
    }
    ASSERT_MSG (sym.symtype() == SymTypeParam || sym.symtype() == SymTypeOutputParam,
                "symtype was %d, data type was %s", (int)sym.symtype(), sym.typespec().c_str());

    if (sym.has_init_ops() && sym.valuesource() == Symbol::DefaultVal) {
        // Handle init ops.
        build_llvm_code (sym.initbegin(), sym.initend());
    } else if (! sym.lockgeom() && ! sym.typespec().is_closure()) {
        // geometrically-varying param; memcpy its default value
        TypeDesc t = sym.typespec().simpletype();
        ll.op_memcpy (llvm_void_ptr (sym), ll.constant_ptr (sym.data()),
                      t.size(), t.basesize() /*align*/);
        if (sym.has_derivs())
            llvm_zero_derivs (sym);
    } else {
        // Use default value
        int num_components = sym.typespec().simpletype().aggregate;
        TypeSpec elemtype = sym.typespec().elementtype();
        for (int a = 0, c = 0; a < arraylen;  ++a) {
            llvm::Value *arrind = sym.typespec().is_array() ? ll.constant(a) : NULL;
            if (sym.typespec().is_closure_based())
                continue;
            for (int i = 0; i < num_components; ++i, ++c) {
                // Fill in the constant val
                llvm::Value* init_val = 0;
                if (elemtype.is_floatbased())
                    init_val = ll.constant (((float*)sym.data())[c]);
                else if (elemtype.is_string())
                    init_val = ll.constant (((ustring*)sym.data())[c]);
                else if (elemtype.is_int())
                    init_val = ll.constant (((int*)sym.data())[c]);
                ASSERT (init_val);
                llvm_store_value (init_val, sym, 0, arrind, i);
            }
        }
        if (sym.has_derivs())
            llvm_zero_derivs (sym);
    }

    // Handle interpolated params.
    // FIXME -- really, we shouldn't assign defaults or run init ops if
    // the values are interpolated.  The perf hit is probably small, since
    // there are so few interpolated params, but we should come back and
    // fix this later.
    if ((sym.symtype() == SymTypeParam || sym.symtype() == SymTypeOutputParam)
        && ! sym.lockgeom()) {
        std::vector<llvm::Value*> args;
        args.push_back (sg_void_ptr());
        args.push_back (ll.constant (sym.name()));
        args.push_back (ll.constant (sym.typespec().simpletype()));
        args.push_back (ll.constant ((int) sym.has_derivs()));
        args.push_back (llvm_void_ptr (sym));
        ll.call_function ("osl_bind_interpolated_param",
                          &args[0], args.size());                            
    }
}
コード例 #2
0
ファイル: imageoutput.cpp プロジェクト: Nvizible/oiio
const void *
ImageOutput::to_native_rectangle (int xbegin, int xend, int ybegin, int yend,
                                  int zbegin, int zend,
                                  TypeDesc format, const void *data,
                                  stride_t xstride, stride_t ystride, stride_t zstride,
                                  std::vector<unsigned char> &scratch)
{
    // native_pixel_bytes is the size of a pixel in the FILE, including
    // the per-channel format, if specified when the file was opened.
    stride_t native_pixel_bytes = (stride_t) m_spec.pixel_bytes (true);
    // perchanfile is true if the file has different per-channel formats
    bool perchanfile = m_spec.channelformats.size() && supports("channelformats");
    // It's an error to pass per-channel data formats to a writer that
    // doesn't support it.
    if (m_spec.channelformats.size() && !perchanfile)
        return NULL;
    // native_data is true if the user is passing data in the native format
    bool native_data = (format == TypeDesc::UNKNOWN ||
                        (format == m_spec.format && !perchanfile));
    // If the user is passing native data and they've left xstride set
    // to Auto, then we know it's the native pixel size.
    if (native_data && xstride == AutoStride)
        xstride = native_pixel_bytes;
    // Fill in the rest of the strides that haven't been set.
    m_spec.auto_stride (xstride, ystride, zstride, format,
                        m_spec.nchannels, xend-xbegin, yend-ybegin);

    // Compute width and height from the rectangle extents
    int width = xend - xbegin;
    int height = yend - ybegin;
    int depth = zend - zbegin;

    // Do the strides indicate that the data area is contiguous?
    bool contiguous = (xstride == (stride_t)m_spec.pixel_bytes(native_data));
    contiguous &= ((ystride == xstride*width || height == 1) &&
                   (zstride == ystride*height || depth == 1));

    if (native_data && contiguous) {
        // Data are already in the native format and contiguous
        // just return a ptr to the original data.
        return data;
    }

    imagesize_t rectangle_pixels = width * height * depth;
    imagesize_t rectangle_values = rectangle_pixels * m_spec.nchannels;
    imagesize_t rectangle_bytes = rectangle_pixels * native_pixel_bytes;

    // Cases to handle:
    // 1. File has per-channel data, user passes native data -- this has
    //    already returned above, since the data didn't need munging.
    // 2. File has per-channel data, user passes some other data type
    // 3. File has uniform data, user passes some other data type
    // 4. File has uniform data, user passes the right data -- note that
    //    this case already returned if the user data was contiguous

    // Handle the per-channel format case (#2) where the user is passing
    // a non-native buffer.
    if (perchanfile) {
        if (native_data) {
            ASSERT (contiguous && "Per-channel native output requires contiguous strides");
        }
        ASSERT (format != TypeDesc::UNKNOWN);
        ASSERT (m_spec.channelformats.size() == (size_t)m_spec.nchannels);
        scratch.resize (rectangle_bytes);
        size_t offset = 0;
        for (int c = 0;  c < m_spec.nchannels;  ++c) {
            TypeDesc chanformat = m_spec.channelformats[c];
            convert_image (1 /* channels */, width, height, depth,
                           (char *)data + c*format.size(), format,
                           xstride, ystride, zstride, 
                           &scratch[offset], chanformat,
                           native_pixel_bytes, AutoStride, AutoStride, NULL,
                           c == m_spec.alpha_channel ? 0 : -1,
                           c == m_spec.z_channel ? 0 : -1);
            offset += chanformat.size ();
        }
        return &scratch[0];
    }

    // The remaining code is where all channels in the file have the
    // same data type, which may or may not be what the user passed in
    // (cases #3 and #4 above).
    imagesize_t contiguoussize = contiguous ? 0 : rectangle_values * native_pixel_bytes;
    contiguoussize = (contiguoussize+3) & (~3); // Round up to 4-byte boundary
    DASSERT ((contiguoussize & 3) == 0);
    imagesize_t floatsize = rectangle_values * sizeof(float);
    scratch.resize (contiguoussize + floatsize + rectangle_bytes);

    // Force contiguity if not already present
    if (! contiguous) {
        data = contiguize (data, m_spec.nchannels, xstride, ystride, zstride,
                           (void *)&scratch[0], width, height, depth, format);
    }

    // Rather than implement the entire cross-product of possible
    // conversions, use float as an intermediate format, which generally
    // will always preserve enough precision.
    const float *buf;
    if (format == TypeDesc::FLOAT) {
        // Already in float format -- leave it as-is.
        buf = (float *)data;
    } else {
        // Convert to from 'format' to float.
        buf = convert_to_float (data, (float *)&scratch[contiguoussize],
                                rectangle_values, format);
    }
    
    // Convert from float to native format.
    return convert_from_float (buf, &scratch[contiguoussize+floatsize], 
                       rectangle_values, m_spec.quant_black, m_spec.quant_white,
                       m_spec.quant_min, m_spec.quant_max,
                       m_spec.format);
}
コード例 #3
0
ファイル: rlainput.cpp プロジェクト: DingTo/oiio
bool
RLAInput::seek_subimage (int subimage, int miplevel, ImageSpec &newspec)
{
    if (miplevel != 0 || subimage < 0)
        return false;

    if (subimage == current_subimage())
        return true;    // already on the right level

    // RLA images allow multiple subimages; they are simply concatenated
    // together, wth image N's header field NextOffset giving the
    // absolute offset of the start of image N+1.
    int diff = subimage - current_subimage ();
    if (subimage - current_subimage () < 0) {
        // If we are requesting an image earlier than the current one,
        // reset to the first subimage.
        fseek (m_file, 0, SEEK_SET);
        if (!read_header ())
            return false;  // read_header always calls error()
        diff = subimage;
    }
    // forward scrolling -- skip subimages until we're at the right place
    while (diff > 0 && m_rla.NextOffset != 0) {
        fseek (m_file, m_rla.NextOffset, SEEK_SET);
        if (!read_header ())
            return false;  // read_header always calls error()
        --diff;
    }
    if (diff > 0 && m_rla.NextOffset == 0) {  // no more subimages to read
        error ("Unknown subimage");
        return false;
    }

    // Now m_rla holds the header of the requested subimage.  Examine it
    // to fill out our ImageSpec.

    if (m_rla.ColorChannelType > CT_FLOAT) {
        error ("Illegal color channel type: %d", m_rla.ColorChannelType);
        return false;
    }
    if (m_rla.MatteChannelType > CT_FLOAT) {
        error ("Illegal matte channel type: %d", m_rla.MatteChannelType);
        return false;
    }
    if (m_rla.AuxChannelType > CT_FLOAT) {
        error ("Illegal auxiliary channel type: %d", m_rla.AuxChannelType);
        return false;
    }

    // pick maximum precision for the time being
    int maxbytes = (std::max (m_rla.NumOfChannelBits * (m_rla.NumOfColorChannels > 0 ? 1 : 0),
                             std::max (m_rla.NumOfMatteBits * (m_rla.NumOfMatteChannels > 0 ? 1 : 0),
                                       m_rla.NumOfAuxBits * (m_rla.NumOfAuxChannels > 0 ? 1 : 0)))
                   + 7) / 8;
    int nchannels = m_rla.NumOfColorChannels + m_rla.NumOfMatteChannels
                                             + m_rla.NumOfAuxChannels;
    TypeDesc maxtype = (maxbytes == 4) ? TypeDesc::UINT32
                     : (maxbytes == 2 ? TypeDesc::UINT16 : TypeDesc::UINT8);
    if (nchannels < 1 || nchannels > 16 ||
        (maxbytes != 1 && maxbytes != 2 && maxbytes != 4)) {
        error ("Failed channel bytes sanity check");
        return false;   // failed sanity check
    }

    m_spec = ImageSpec (m_rla.ActiveRight - m_rla.ActiveLeft + 1,
                        (m_rla.ActiveTop - m_rla.ActiveBottom + 1)
                            / (m_rla.FieldRendered ? 2 : 1), // interlaced image?
                        m_rla.NumOfColorChannels
                        + m_rla.NumOfMatteChannels
                        + m_rla.NumOfAuxChannels, maxtype);
    
    // set window dimensions etc.
    m_spec.x = m_rla.ActiveLeft;
    m_spec.y = m_spec.height - m_rla.ActiveTop - 1;
    m_spec.full_width = m_rla.WindowRight - m_rla.WindowLeft + 1;
    m_spec.full_height = m_rla.WindowTop - m_rla.WindowBottom + 1;
    m_spec.full_depth = 1;
    m_spec.full_x = m_rla.WindowLeft;
    m_spec.full_y = m_spec.full_height - m_rla.WindowTop - 1;

    // set channel formats and stride
    m_stride = 0;
    TypeDesc t = get_channel_typedesc (m_rla.ColorChannelType, m_rla.NumOfChannelBits);
    for (int i = 0; i < m_rla.NumOfColorChannels; ++i)
        m_spec.channelformats.push_back (t);
    m_stride += m_rla.NumOfColorChannels * t.size ();
    t = get_channel_typedesc (m_rla.MatteChannelType, m_rla.NumOfMatteBits);
    for (int i = 0; i < m_rla.NumOfMatteChannels; ++i)
        m_spec.channelformats.push_back (t);
    m_stride += m_rla.NumOfMatteChannels * t.size ();
    t = get_channel_typedesc (m_rla.AuxChannelType, m_rla.NumOfAuxBits);
    for (int i = 0; i < m_rla.NumOfAuxChannels; ++i)
        m_spec.channelformats.push_back (t);
    m_stride += m_rla.NumOfAuxChannels * t.size ();

    // But if all channels turned out the same, just use 'format' and don't
    // bother sending back channelformats at all.
    bool allsame = true;
    for (int c = 1;  c < m_spec.nchannels;  ++c)
        allsame &= (m_spec.channelformats[c] == m_spec.channelformats[0]);
    if (allsame) {
        m_spec.format = m_spec.channelformats[0];
        m_spec.channelformats.clear();
        m_spec.attribute ("oiio:BitsPerSample", m_rla.NumOfChannelBits);
        // N.B. don't set bps for mixed formats, it isn't well defined
    }

    // make a guess at channel names for the time being
    m_spec.default_channel_names ();
    // this is always true
    m_spec.attribute ("compression", "rle");
    
    if (m_rla.DateCreated[0]) {
        char month[4] = {0, 0, 0, 0};
        int d, h, M, m, y;
        if (sscanf (m_rla.DateCreated, "%c%c%c %d %d:%d %d",
            month + 0, month + 1, month + 2, &d, &h, &m, &y) == 7) {
            M = get_month_number (month);
            if (M > 0) {
                // construct a date/time marker in OIIO convention
                m_spec.attribute ("DateTime", 
                                  Strutil::format("%4d:%02d:%02d %02d:%02d:00", y, M, d, h, m));
            }
        }
    }
    
    // save some typing by using macros
#define FIELD(x,name)             if (m_rla.x > 0)              \
                                            m_spec.attribute (name, m_rla.x)
#define STRING_FIELD(x,name)      if (m_rla.x[0])               \
                                            m_spec.attribute (name, m_rla.x)
    STRING_FIELD (Description, "ImageDescription");
    FIELD (FrameNumber, "rla:FrameNumber");
    FIELD (Revision, "rla:Revision");
    FIELD (JobNumber, "rla:JobNumber");
    FIELD (FieldRendered, "rla:FieldRendered");
    STRING_FIELD (FileName, "rla:FileName");
    STRING_FIELD (ProgramName, "Software");
    STRING_FIELD (MachineName, "HostComputer");
    STRING_FIELD (UserName, "Artist");
    STRING_FIELD (Aspect, "rla:Aspect");
    STRING_FIELD (ColorChannel, "rla:ColorChannel");
    STRING_FIELD (Time, "rla:Time");
    STRING_FIELD (Filter, "rla:Filter");
    STRING_FIELD (AuxData, "rla:AuxData");
#undef STRING_FIELD
#undef FIELD

    float f[3]; // variable will be reused for chroma, thus the array
    f[0] = atof (m_rla.Gamma);
    if (f[0] > 0.f) {
        if (f[0] == 1.f)
            m_spec.attribute ("oiio:ColorSpace", "Linear");
        else {
            m_spec.attribute ("oiio:ColorSpace", "GammaCorrected");
            m_spec.attribute ("oiio:Gamma", f[0]);
        }
    }
    
    f[0] = atof (m_rla.AspectRatio);
    if (f[0] > 0.f)
        m_spec.attribute ("PixelAspectRatio", f[0]);
    
    // read chromaticity points
    if (m_rla.RedChroma[0]) {
        int num = sscanf(m_rla.RedChroma, "%f %f %f", f + 0, f + 1, f + 2);
        if (num >= 2)
            m_spec.attribute ("rla:RedChroma", TypeDesc(TypeDesc::FLOAT,
                              num == 2 ? TypeDesc::VEC2 : TypeDesc::VEC3,
                              TypeDesc::POINT), f);
    }
    if (m_rla.GreenChroma[0]) {
        int num = sscanf(m_rla.GreenChroma, "%f %f %f", f + 0, f + 1, f + 2);
        if (num >= 2)
            m_spec.attribute ("rla:GreenChroma", TypeDesc(TypeDesc::FLOAT,
                              num == 2 ? TypeDesc::VEC2 : TypeDesc::VEC3,
                              TypeDesc::POINT), f);
    }
    if (m_rla.BlueChroma[0]) {
        int num = sscanf(m_rla.BlueChroma, "%f %f %f", f + 0, f + 1, f + 2);
        if (num >= 2)
            m_spec.attribute ("rla:BlueChroma", TypeDesc(TypeDesc::FLOAT,
                              num == 2 ? TypeDesc::VEC2 : TypeDesc::VEC3,
                              TypeDesc::POINT), f);
    }
    if (m_rla.WhitePoint[0]) {
        int num = sscanf(m_rla.WhitePoint, "%f %f %f", f + 0, f + 1, f + 2);
        if (num >= 2)
            m_spec.attribute ("rla:WhitePoint", TypeDesc(TypeDesc::FLOAT,
                              num == 2 ? TypeDesc::VEC2 : TypeDesc::VEC3,
                              TypeDesc::POINT), f);
    }

    newspec = spec ();    
    m_subimage = subimage;
    
    // N.B. the file pointer is now immediately after the scanline
    // offset table for this subimage.
    return true;
}
コード例 #4
0
ファイル: rlainput.cpp プロジェクト: DingTo/oiio
bool
RLAInput::decode_channel_group (int first_channel, short num_channels,
                                short num_bits, int y)
{
    // Some preliminaries -- figure out various sizes and offsets
    int chsize;         // size of the channels in this group, in bytes
    int offset;         // buffer offset to first channel
    int pixelsize;      // spacing between pixels (in bytes) in the output
    TypeDesc chantype;  // data type for the channel
    if (! m_spec.channelformats.size()) {
        // No per-channel formats, they are all the same, so it's easy
        chantype = m_spec.format;
        chsize = chantype.size ();
        offset = first_channel * chsize;
        pixelsize = chsize * m_spec.nchannels;
    } else {
        // Per-channel formats differ, need to sum them up
        chantype = m_spec.channelformats[first_channel];
        chsize = chantype.size ();
        offset = 0;
        pixelsize = m_spec.pixel_bytes (true);
        for (int i = 0; i < first_channel; ++i)
            offset += m_spec.channelformats[i].size ();
    }

    // Read the big-endian values into the buffer.
    // The channels are simply contatenated together in order.
    // Each channel starts with a length, from which we know how many
    // bytes of encoded RLE data to read.  Then there are RLE
    // spans for each 8-bit slice of the channel.
    std::vector<char> encoded;
    for (int c = 0;  c < num_channels;  ++c) {
        // Read the length
        uint16_t length; // number of encoded bytes
        if (!read (&length)) {
            error ("Read error: couldn't read RLE record length");
            return false;
        }
        // Read the encoded RLE record
        encoded.resize (length);
        if (!read (&encoded[0], length)) {
            error ("Read error: couldn't read RLE data span");
            return false;
        }

        if (chantype == TypeDesc::FLOAT) {
            // Special case -- float data is just dumped raw, no RLE
            for (int x = 0;  x < m_spec.width;  ++x)
                *((float *)&m_buf[offset+c*chsize+x*pixelsize]) =
                    ((float *)&encoded[0])[x];
            continue;
        }

        // Decode RLE -- one pass for each significant byte of the file,
        // which we re-interleave properly by passing the right offsets
        // and strides to decode_rle_span.
        size_t eoffset = 0;
        for (int bytes = 0;  bytes < chsize;  ++bytes) {
            size_t e = decode_rle_span (&m_buf[offset+c*chsize+bytes],
                                        m_spec.width, pixelsize,
                                        &encoded[eoffset], length);
            if (! e)
                return false;
            eoffset += e;
        }
    }

    // If we're little endian, swap endianness in place for 2- and
    // 4-byte pixel data.
    if (littleendian()) {
        if (chsize == 2) {
            if (num_channels == m_spec.nchannels)
                swap_endian ((uint16_t *)&m_buf[0], num_channels*m_spec.width);
            else
                for (int x = 0;  x < m_spec.width;  ++x)
                    swap_endian ((uint16_t *)&m_buf[offset+x*pixelsize], num_channels);
        } else if (chsize == 4 && chantype != TypeDesc::FLOAT) {
            if (num_channels == m_spec.nchannels)
                swap_endian ((uint32_t *)&m_buf[0], num_channels*m_spec.width);
            else
                for (int x = 0;  x < m_spec.width;  ++x)
                    swap_endian ((uint32_t *)&m_buf[offset+x*pixelsize], num_channels);
        }
    }

    // If not 8*2^n bits, need to rescale.  For example, if num_bits is
    // 10, the data values run 0-1023, but are stored in uint16.  So we
    // now rescale to the full range of the output buffer range, per
    // OIIO conventions.
    if (num_bits == 8 || num_bits == 16 || num_bits == 32) {
        // ok -- no rescaling needed
    } else if (num_bits == 10) {
        // fast, common case -- use templated hard-code
        for (int x = 0;  x < m_spec.width;  ++x) {
            uint16_t *b = (uint16_t *)(&m_buf[offset+x*pixelsize]);
            for (int c = 0;  c < num_channels;  ++c)
                b[c] = bit_range_convert<10,16> (b[c]);
        }
    } else if (num_bits < 8) {
        // rare case, use slow code to make this clause short and simple
        for (int x = 0;  x < m_spec.width;  ++x) {
            uint8_t *b = (uint8_t *)&m_buf[offset+x*pixelsize];
            for (int c = 0;  c < num_channels;  ++c)
                b[c] = bit_range_convert (b[c], num_bits, 8);
        }
    } else if (num_bits > 8 && num_bits < 16) {
        // rare case, use slow code to make this clause short and simple
        for (int x = 0;  x < m_spec.width;  ++x) {
            uint16_t *b = (uint16_t *)&m_buf[offset+x*pixelsize];
            for (int c = 0;  c < num_channels;  ++c)
                b[c] = bit_range_convert (b[c], num_bits, 16);
        }
    } else if (num_bits > 16 && num_bits < 32) {
        // rare case, use slow code to make this clause short and simple
        for (int x = 0;  x < m_spec.width;  ++x) {
            uint32_t *b = (uint32_t *)&m_buf[offset+x*pixelsize];
            for (int c = 0;  c < num_channels;  ++c)
                b[c] = bit_range_convert (b[c], num_bits, 32);
        }
    }
    return true;
}
コード例 #5
0
static void
dump_data (ImageInput *input)
{
    const ImageSpec &spec (input->spec());
    if (spec.deep) {
        // Special handling of deep data
        DeepData dd;
        if (! input->read_native_deep_image (dd)) {
            printf ("    dump data: could not read image\n");
            return;
        }
        int nc = spec.nchannels;
        TypeDesc *types = &dd.channeltypes[0];
        for (int z = 0, pixel = 0;  z < spec.depth;  ++z) {
            for (int y = 0;  y < spec.height;  ++y) {
                for (int x = 0;  x < spec.width;  ++x, ++pixel) {
                    int nsamples = dd.nsamples[pixel];
                    std::cout << "    Pixel (";
                    if (spec.depth > 1 || spec.z != 0)
                        std::cout << Strutil::format("%d, %d, %d",
                                                     x+spec.x, y+spec.y, z+spec.z);
                    else
                        std::cout << Strutil::format("%d, %d",
                                                     x+spec.x, y+spec.y);
                    std::cout << "): " << nsamples << " samples"
                              << (nsamples ? ":" : "");
                    for (int s = 0;  s < nsamples;  ++s) {
                        if (s)
                            std::cout << " / ";
                        for (int c = 0;  c < nc;  ++c) {
                            std::cout << " " << spec.channelnames[c] << "=";
                            const char *ptr = (const char *)dd.pointers[pixel*nc+c];
                            TypeDesc t = types[c];
                            ptr += s * t.size();
                            if (t.basetype == TypeDesc::FLOAT) {
                                std::cout << *(const float *)ptr;
                            } else if (t.basetype == TypeDesc::HALF) {
                                std::cout << *(const half *)ptr;
                            } else if (t.basetype == TypeDesc::UINT) {
                                std::cout << *(const unsigned int *)ptr;
                            }
                        }
                    }
                    std::cout << "\n";
                }
            }
        }

    } else {
        std::vector<float> buf(spec.image_pixels() * spec.nchannels);
        if (! input->read_image (TypeDesc::UNKNOWN /*native*/, &buf[0])) {
            printf ("    dump data: could not read image\n");
            return;
        }
        const float *ptr = &buf[0];
        for (int z = 0;  z < spec.depth;  ++z) {
            for (int y = 0;  y < spec.height;  ++y) {
                for (int x = 0;  x < spec.width;  ++x) {
                    if (spec.depth > 1 || spec.z != 0)
                        std::cout << Strutil::format("    Pixel (%d, %d, %d):",
                                                     x+spec.x, y+spec.y, z+spec.z);
                    else
                        std::cout << Strutil::format("    Pixel (%d, %d):",
                                                     x+spec.x, y+spec.y);
                    for (int c = 0;  c < spec.nchannels;  ++c, ++ptr) {
                        std::cout << ' ' << (*ptr);
                    }
                    std::cout << "\n";
                }
            }
        }
    }
}
コード例 #6
0
llvm::Type *
BackendLLVM::llvm_type_groupdata ()
{
    // If already computed, return it
    if (m_llvm_type_groupdata)
        return m_llvm_type_groupdata;

    std::vector<llvm::Type*> fields;
    int offset = 0;
    int order = 0;

    if (llvm_debug() >= 2)
        std::cout << "Group param struct:\n";

    // First, add the array that tells if each layer has run.  But only make
    // slots for the layers that may be called/used.
    if (llvm_debug() >= 2)
        std::cout << "  layers run flags: " << m_num_used_layers
                  << " at offset " << offset << "\n";
    int sz = (m_num_used_layers + 3) & (~3);  // Round up to 32 bit boundary
    fields.push_back (ll.type_array (ll.type_bool(), sz));
    offset += sz * sizeof(bool);
    ++order;

    // Now add the array that tells which userdata have been initialized,
    // and the space for the userdata values.
    int nuserdata = (int) group().m_userdata_names.size();
    if (nuserdata) {
        if (llvm_debug() >= 2)
            std::cout << "  userdata initialized flags: " << nuserdata
                      << " at offset " << offset << ", field " << order << "\n";
        ustring *names = & group().m_userdata_names[0];
        TypeDesc *types = & group().m_userdata_types[0];
        int *offsets = & group().m_userdata_offsets[0];
        int sz = (nuserdata + 3) & (~3);
        fields.push_back (ll.type_array (ll.type_bool(), sz));
        offset += nuserdata * sizeof(bool);
        ++order;
        for (int i = 0; i < nuserdata; ++i) {
            TypeDesc type = types[i];
            int n = type.numelements() * 3;   // always make deriv room
            type.arraylen = n;
            fields.push_back (llvm_type (type));
            // Alignment
            int align = type.basesize();
            offset = OIIO::round_to_multiple_of_pow2 (offset, align);
            if (llvm_debug() >= 2)
                std::cout << "  userdata " << names[i] << ' ' << type
                          << ", field " << order << ", offset " << offset << "\n";
            offsets[i] = offset;
            offset += int(type.size());
            ++order;
        }
    }

    // For each layer in the group, add entries for all params that are
    // connected or interpolated, and output params.  Also mark those
    // symbols with their offset within the group struct.
    m_param_order_map.clear ();
    for (int layer = 0;  layer < group().nlayers();  ++layer) {
        ShaderInstance *inst = group()[layer];
        if (inst->unused())
            continue;
        FOREACH_PARAM (Symbol &sym, inst) {
            TypeSpec ts = sym.typespec();
            if (ts.is_structure())  // skip the struct symbol itself
                continue;
            const int arraylen = std::max (1, sym.typespec().arraylength());
            const int derivSize = (sym.has_derivs() ? 3 : 1);
            ts.make_array (arraylen * derivSize);
            fields.push_back (llvm_type (ts));

            // Alignment
            size_t align = sym.typespec().is_closure_based() ? sizeof(void*) :
                    sym.typespec().simpletype().basesize();
            if (offset & (align-1))
                offset += align - (offset & (align-1));
            if (llvm_debug() >= 2)
                std::cout << "  " << inst->layername() 
                          << " (" << inst->id() << ") " << sym.mangled()
                          << " " << ts.c_str() << ", field " << order 
                          << ", size " << derivSize * int(sym.size())
                          << ", offset " << offset << std::endl;
            sym.dataoffset ((int)offset);
            offset += derivSize* int(sym.size());

            m_param_order_map[&sym] = order;
            ++order;
        }
    }
コード例 #7
0
ファイル: imageinput.cpp プロジェクト: angeljimenez/oiio
bool
ImageInput::read_image (TypeDesc format, void *data,
                        stride_t xstride, stride_t ystride, stride_t zstride,
                        ProgressCallback progress_callback,
                        void *progress_callback_data)
{
    bool native = (format == TypeDesc::UNKNOWN);
    stride_t pixel_bytes = native ? (stride_t) m_spec.pixel_bytes (native)
                                  : (stride_t) (format.size()*m_spec.nchannels);
    if (native && xstride == AutoStride)
        xstride = pixel_bytes;
    m_spec.auto_stride (xstride, ystride, zstride, format, m_spec.nchannels,
                        m_spec.width, m_spec.height);
    bool ok = true;
    if (progress_callback)
        if (progress_callback (progress_callback_data, 0.0f))
            return ok;
    if (m_spec.tile_width) {
        // Tiled image

        // Locally allocate a single tile to gracefully deal with image
        // dimensions smaller than a tile, or if one of the tiles runs
        // past the right or bottom edge.  Then we copy from our tile to
        // the user data, only copying valid pixel ranges.
        stride_t tilexstride = pixel_bytes;
        stride_t tileystride = tilexstride * m_spec.tile_width;
        stride_t tilezstride = tileystride * m_spec.tile_height;
        imagesize_t tile_pixels = m_spec.tile_pixels();
        std::vector<char> pels (tile_pixels * pixel_bytes);
        for (int z = 0;  z < m_spec.depth;  z += m_spec.tile_depth)
            for (int y = 0;  y < m_spec.height;  y += m_spec.tile_height) {
                for (int x = 0;  x < m_spec.width && ok;  x += m_spec.tile_width) {
                    ok &= read_tile (x+m_spec.x, y+m_spec.y, z+m_spec.z,
                                     format, &pels[0]);
                    // Now copy out the scanlines
                    int ntz = std::min (z+m_spec.tile_depth, m_spec.depth) - z;
                    int nty = std::min (y+m_spec.tile_height, m_spec.height) - y;
                    int ntx = std::min (x+m_spec.tile_width, m_spec.width) - x;
                    for (int tz = 0;  tz < ntz;  ++tz) {
                        for (int ty = 0;  ty < nty;  ++ty) {
                            // FIXME -- doesn't work for non-contiguous scanlines
                            memcpy ((char *)data + x*xstride + (y+ty)*ystride + (z+tz)*zstride,
                                    &pels[ty*tileystride+tz*tilezstride],
                                    ntx*tilexstride);
                        }
                    }
//                    return ok; // DEBUG -- just try very first tile
                }
                if (progress_callback)
                    if (progress_callback (progress_callback_data, (float)y/m_spec.height))
                        return ok;
            }
    } else {
        // Scanline image
        for (int z = 0;  z < m_spec.depth;  ++z)
            for (int y = 0;  y < m_spec.height && ok;  ++y) {
                ok &= read_scanline (y+m_spec.y, z+m_spec.z, format,
                                     (char *)data + z*zstride + y*ystride,
                                     xstride);
                if (progress_callback && !(y & 0x0f))
                    if (progress_callback (progress_callback_data, (float)y/m_spec.height))
                        return ok;
            }
    }
    if (progress_callback)
        progress_callback (progress_callback_data, 1.0f);
    return ok;
}
コード例 #8
0
ファイル: imagebufalgo_opencv.cpp プロジェクト: AheadIO/oiio
OIIO_NAMESPACE_BEGIN



bool
ImageBufAlgo::from_IplImage (ImageBuf &dst, const IplImage *ipl,
                             TypeDesc convert)
{
    if (! ipl) {
        DASSERT (0 && "ImageBufAlgo::fromIplImage called with NULL ipl");
        dst.error ("Passed NULL source IplImage");
        return false;
    }
#ifdef USE_OPENCV
    TypeDesc srcformat;
    switch (ipl->depth) {
    case int(IPL_DEPTH_8U) :
        srcformat = TypeDesc::UINT8;  break;
    case int(IPL_DEPTH_8S) :
        srcformat = TypeDesc::INT8;  break;
    case int(IPL_DEPTH_16U) :
        srcformat = TypeDesc::UINT16;  break;
    case int(IPL_DEPTH_16S) :
        srcformat = TypeDesc::INT16;  break;
    case int(IPL_DEPTH_32F) :
        srcformat = TypeDesc::FLOAT;  break;
    case int(IPL_DEPTH_64F) :
        srcformat = TypeDesc::DOUBLE;  break;
    default:
        DASSERT (0 && "unknown IplImage type");
        dst.error ("Unsupported IplImage depth %d", (int)ipl->depth);
        return false;
    }

    TypeDesc dstformat = (convert != TypeDesc::UNKNOWN) ? convert : srcformat;
    ImageSpec spec (ipl->width, ipl->height, ipl->nChannels, dstformat);
    // N.B. The OpenCV headers say that ipl->alphaChannel,
    // ipl->colorModel, and ipl->channelSeq are ignored by OpenCV.

    if (ipl->dataOrder != IPL_DATA_ORDER_PIXEL) {
        // We don't handle separate color channels, and OpenCV doesn't either
        dst.error ("Unsupported IplImage data order %d", (int)ipl->dataOrder);
        return false;
    }

    dst.reset (dst.name(), spec);
    size_t pixelsize = srcformat.size()*spec.nchannels;
    // Account for the origin in the line step size, to end up with the
    // standard OIIO origin-at-upper-left:
    size_t linestep = ipl->origin ? -ipl->widthStep : ipl->widthStep;
    // Block copy and convert
    convert_image (spec.nchannels, spec.width, spec.height, 1,
                   ipl->imageData, srcformat,
                   pixelsize, linestep, 0,
                   dst.pixeladdr(0,0), dstformat,
                   spec.pixel_bytes(), spec.scanline_bytes(), 0);
    // FIXME - honor dataOrder.  I'm not sure if it is ever used by
    // OpenCV.  Fix when it becomes a problem.

    // OpenCV uses BGR ordering
    // FIXME: what do they do with alpha?
    if (spec.nchannels >= 3) {
        float pixel[4];
        for (int y = 0;  y < spec.height;  ++y) {
            for (int x = 0;  x < spec.width;  ++x) {
                dst.getpixel (x, y, pixel, 4);
                float tmp = pixel[0];  pixel[0] = pixel[2]; pixel[2] = tmp;
                dst.setpixel (x, y, pixel, 4);
            }
        }
    }
    // FIXME -- the copy and channel swap should happen all as one loop,
    // probably templated by type.

    return true;
#else
    dst.error ("fromIplImage not supported -- no OpenCV support at compile time");
    return false;
#endif
}
コード例 #9
0
bool 
ImageInput::read_tile (int x, int y, int z, TypeDesc format, void *data,
                       stride_t xstride, stride_t ystride, stride_t zstride)
{
    if (! m_spec.tile_width ||
        ((x-m_spec.x) % m_spec.tile_width) != 0 ||
        ((y-m_spec.y) % m_spec.tile_height) != 0 ||
        ((z-m_spec.z) % m_spec.tile_depth) != 0)
        return false;   // coordinates are not a tile corner

    // native_pixel_bytes is the size of a pixel in the FILE, including
    // the per-channel format.
    stride_t native_pixel_bytes = (stride_t) m_spec.pixel_bytes (true);
    // perchanfile is true if the file has different per-channel formats
    bool perchanfile = m_spec.channelformats.size();
    // native_data is true if the user asking for data in the native format
    bool native_data = (format == TypeDesc::UNKNOWN ||
                        (format == m_spec.format && !perchanfile));
    if (format == TypeDesc::UNKNOWN && xstride == AutoStride)
        xstride = native_pixel_bytes;
    m_spec.auto_stride (xstride, ystride, zstride, format, m_spec.nchannels,
                        m_spec.tile_width, m_spec.tile_height);
    // Do the strides indicate that the data area is contiguous?
    bool contiguous = (native_data && xstride == native_pixel_bytes) ||
        (!native_data && xstride == (stride_t)m_spec.pixel_bytes(false));
    contiguous &= (ystride == xstride*m_spec.tile_width &&
                   (zstride == ystride*m_spec.tile_height || zstride == 0));

    // If user's format and strides are set up to accept the native data
    // layout, read the tile directly into the user's buffer.
    if (native_data && contiguous)
        return read_native_tile (x, y, z, data);  // Simple case

    // Complex case -- either changing data type or stride
    size_t tile_values = (size_t)m_spec.tile_pixels() * m_spec.nchannels;

    std::vector<char> buf (m_spec.tile_bytes(true));
    bool ok = read_native_tile (x, y, z, &buf[0]);
    if (! ok)
        return false;
    if (! perchanfile) {
        // No per-channel formats -- do the conversion in one shot
        ok = contiguous 
            ? convert_types (m_spec.format, &buf[0], format, data, tile_values)
            : convert_image (m_spec.nchannels, m_spec.tile_width, m_spec.tile_height, m_spec.tile_depth, 
                             &buf[0], m_spec.format, AutoStride, AutoStride, AutoStride,
                             data, format, xstride, ystride, zstride);
    } else {
        // Per-channel formats -- have to convert/copy channels individually
        if (native_data) {
            ASSERT (contiguous && "Per-channel native input requires contiguous strides");
        }
        ASSERT (format != TypeDesc::UNKNOWN);
        ASSERT (m_spec.channelformats.size() == (size_t)m_spec.nchannels);
        size_t offset = 0;
        for (int c = 0;  c < m_spec.nchannels;  ++c) {
            TypeDesc chanformat = m_spec.channelformats[c];
            ok = convert_image (1 /* channels */, m_spec.tile_width,
                                m_spec.tile_height, m_spec.tile_depth,
                                &buf[offset], chanformat, 
                                native_pixel_bytes, AutoStride, AutoStride,
                                (char *)data + c*format.size(),
                                format, xstride, AutoStride, AutoStride);
            offset += chanformat.size ();
        }
    }

    if (! ok)
        error ("ImageInput::read_tile : no support for format %s",
               m_spec.format.c_str());
    return ok;
}
コード例 #10
0
bool 
ImageInput::read_tiles (int xbegin, int xend, int ybegin, int yend,
                        int zbegin, int zend, 
                        int firstchan, int nchans,
                        TypeDesc format, void *data,
                        stride_t xstride, stride_t ystride, stride_t zstride)
{
    if (! m_spec.valid_tile_range (xbegin, xend, ybegin, yend, zbegin, zend))
        return false;

    nchans = std::min (nchans, m_spec.nchannels-firstchan);
    // native_pixel_bytes is the size of a pixel in the FILE, including
    // the per-channel format.
    stride_t native_pixel_bytes = (stride_t) m_spec.pixel_bytes (firstchan, nchans, true);
    // perchanfile is true if the file has different per-channel formats
    bool perchanfile = m_spec.channelformats.size();
    // native_data is true if the user asking for data in the native format
    bool native_data = (format == TypeDesc::UNKNOWN ||
                        (format == m_spec.format && !perchanfile));
    if (format == TypeDesc::UNKNOWN && xstride == AutoStride)
        xstride = native_pixel_bytes;
    m_spec.auto_stride (xstride, ystride, zstride, format, nchans,
                        xend-xbegin, yend-ybegin);
    // Do the strides indicate that the data area is contiguous?
    bool contiguous = (native_data && xstride == native_pixel_bytes) ||
        (!native_data && xstride == (stride_t)m_spec.pixel_bytes(false));
    contiguous &= (ystride == xstride*(xend-xbegin) &&
                   (zstride == ystride*(yend-ybegin) || (zend-zbegin) <= 1));

    int nxtiles = (xend - xbegin + m_spec.tile_width - 1) / m_spec.tile_width;
    int nytiles = (yend - ybegin + m_spec.tile_height - 1) / m_spec.tile_height;
    int nztiles = (zend - zbegin + m_spec.tile_depth - 1) / m_spec.tile_depth;

    // If user's format and strides are set up to accept the native data
    // layout, and we're asking for a whole number of tiles (no partial
    // tiles at the edges), then read the tile directly into the user's
    // buffer.
    if (native_data && contiguous &&
        (xend-xbegin) == nxtiles*m_spec.tile_width &&
        (yend-ybegin) == nytiles*m_spec.tile_height &&
        (zend-zbegin) == nztiles*m_spec.tile_depth) {
        if (firstchan == 0 && nchans == m_spec.nchannels)
            return read_native_tiles (xbegin, xend, ybegin, yend, zbegin, zend,
                                      data);  // Simple case
        else
            return read_native_tiles (xbegin, xend, ybegin, yend, zbegin, zend,
                                      firstchan, nchans, data);
    }

    // No such luck.  Just punt and read tiles individually.
    bool ok = true;
    stride_t pixelsize = native_data ? native_pixel_bytes 
                                     : (format.size() * nchans);
    stride_t full_pixelsize = native_data ? m_spec.pixel_bytes(true)
                                          : (format.size() * m_spec.nchannels);
    size_t prefix_bytes = m_spec.pixel_bytes (0,firstchan,true);
    std::vector<char> buf;
    for (int z = zbegin;  z < zend;  z += std::max(1,m_spec.tile_depth)) {
        int zd = std::min (zend-z, m_spec.tile_depth);
        for (int y = ybegin;  y < yend;  y += m_spec.tile_height) {
            char *tilestart = ((char *)data + (z-zbegin)*zstride
                               + (y-ybegin)*ystride);
            int yh = std::min (yend-y, m_spec.tile_height);
            for (int x = xbegin;  ok && x < xend;  x += m_spec.tile_width) {
                int xw = std::min (xend-x, m_spec.tile_width);
                // Full tiles are read directly into the user buffer,
                // but partial tiles (such as at the image edge) or
                // partial channel subsets are read into a buffer and
                // then copied.
                if (xw == m_spec.tile_width && yh == m_spec.tile_height &&
                      zd == m_spec.tile_depth &&
                      firstchan == 0 && nchans == m_spec.nchannels) {
                    ok &= read_tile (x, y, z, format, tilestart,
                                     xstride, ystride, zstride);
                } else {
                    buf.resize (m_spec.tile_bytes());
                    ok &= read_tile (x, y, z, format, &buf[0],
                                     full_pixelsize,
                                     full_pixelsize*m_spec.tile_width,
                                     full_pixelsize*m_spec.tile_pixels());
                    if (ok)
                        copy_image (nchans, xw, yh, zd, &buf[prefix_bytes],
                                    pixelsize, full_pixelsize,
                                    full_pixelsize*m_spec.tile_width,
                                    full_pixelsize*m_spec.tile_pixels(),
                                    tilestart, xstride, ystride, zstride);
                }
                tilestart += m_spec.tile_width * xstride;
            }
        }
    }

    if (! ok)
        error ("ImageInput::read_tiles : no support for format %s",
               m_spec.format.c_str());
    return ok;
}
コード例 #11
0
bool
ImageInput::read_scanlines (int ybegin, int yend, int z,
                            int firstchan, int nchans,
                            TypeDesc format, void *data,
                            stride_t xstride, stride_t ystride)
{
    nchans = std::min (nchans, m_spec.nchannels-firstchan);
    yend = std::min (yend, spec().y+spec().height);
    size_t native_pixel_bytes = m_spec.pixel_bytes (firstchan, nchans, true);
    imagesize_t native_scanline_bytes = clamped_mult64 ((imagesize_t)m_spec.width,
                                                        (imagesize_t)native_pixel_bytes);
    bool native = (format == TypeDesc::UNKNOWN);
    size_t pixel_bytes = native ? native_pixel_bytes : format.size()*nchans;
    if (native && xstride == AutoStride)
        xstride = pixel_bytes;
    stride_t zstride = AutoStride;
    m_spec.auto_stride (xstride, ystride, zstride, format, nchans,
                        m_spec.width, m_spec.height);
    bool contiguous = (xstride == (stride_t) native_pixel_bytes &&
                       ystride == (stride_t) native_scanline_bytes);
    // If user's format and strides are set up to accept the native data
    // layout, read the scanlines directly into the user's buffer.
    bool rightformat = (format == TypeDesc::UNKNOWN) ||
        (format == m_spec.format && m_spec.channelformats.empty());
    if (rightformat && contiguous) {
        if (firstchan == 0 && nchans == m_spec.nchannels)
            return read_native_scanlines (ybegin, yend, z, data);
        else
            return read_native_scanlines (ybegin, yend, z,
                                          firstchan, nchans, data);
    }

    // No such luck.  Read scanlines in chunks.

    const imagesize_t limit = 16*1024*1024;   // Allocate 16 MB, or 1 scanline
    int chunk = std::max (1, int(limit / native_scanline_bytes));
    std::vector<unsigned char> buf (chunk * native_scanline_bytes);

    bool ok = true;
    int scanline_values = m_spec.width * nchans;
    for (;  ok && ybegin < yend;  ybegin += chunk) {
        int y1 = std::min (ybegin+chunk, yend);
        ok &= read_native_scanlines (ybegin, y1, z, firstchan, nchans, &buf[0]);
        if (! ok)
            break;

        int nscanlines = y1 - ybegin;
        int chunkvalues = scanline_values * nscanlines;
        if (m_spec.channelformats.empty()) {
            // No per-channel formats -- do the conversion in one shot
            if (contiguous) {
                ok = convert_types (m_spec.format, &buf[0], format, data, chunkvalues);
            } else {
                ok = convert_image (nchans, m_spec.width, nscanlines, 1, 
                                    &buf[0], m_spec.format, AutoStride, AutoStride, AutoStride,
                                    data, format, xstride, ystride, zstride);
            }
        } else {
            // Per-channel formats -- have to convert/copy channels individually
            size_t offset = 0;
            for (int c = 0;  ok && c < nchans;  ++c) {
                TypeDesc chanformat = m_spec.channelformats[c+firstchan];
                ok = convert_image (1 /* channels */, m_spec.width, nscanlines, 1, 
                                    &buf[offset], chanformat, 
                                    pixel_bytes, AutoStride, AutoStride,
                                    (char *)data + c*m_spec.format.size(),
                                    format, xstride, ystride, zstride);
                offset += chanformat.size ();
            }
        }
        if (! ok)
            error ("ImageInput::read_scanlines : no support for format %s",
                   m_spec.format.c_str());
        data = (char *)data + ystride*nscanlines;
    }
    return ok;
}
コード例 #12
0
ファイル: rlaoutput.cpp プロジェクト: amanforindia/oiio
bool
RLAOutput::encode_channel (const unsigned char *data, stride_t xstride,
                           TypeDesc chantype, int bits)
{
    if (chantype == TypeDesc::FLOAT) {
        // Special case -- float data is just dumped raw, no RLE
        uint16_t size = m_spec.width * sizeof(float);
        write (&size);
        for (int x = 0;  x < m_spec.width;  ++x)
            write ((const float *)&data[x*xstride]);
        return true;
    }

    m_rle.resize (2);   // reserve t bytes for the encoded size

    // multi-byte data types are sliced to MSB, nextSB, ..., LSB
    int chsize = (int)chantype.size();
    for (int byte = 0;  byte < chsize;  ++byte) {
        int lastval = -1;     // last value
        int count = 0;        // count of raw or repeats
        bool repeat = false;  // if true, we're repeating
        int runbegin = 0;     // where did the run begin
        int byteoffset = bigendian() ? byte : (chsize-byte-1);
        for (int x = 0;  x < m_spec.width;  ++x) {
            int newval = data[x*xstride+byteoffset];
            if (count == 0) {   // beginning of a run.
                count = 1;
                repeat = true;  // presumptive
                runbegin = x;
            } else if (repeat) { // We've seen one or more repeating characters
                if (newval == lastval) {
                    // another repeating value
                    ++count;
                } else {
                    // We stopped repeating.
                    if (count < 3) {
                        // If we didn't even have 3 in a row, just 
                        // retroactively treat it as a raw run.
                        ++count;
                        repeat = false;
                    } else {
                        // We are ending a 3+ repetition
                        m_rle.push_back (count-1);
                        m_rle.push_back (lastval);
                        count = 1;
                        runbegin = x;
                    }
                }
            } else {  // Have not been repeating
                if (newval == lastval) {
                    // starting a repetition?  Output previous
                    ASSERT (count > 1);
                    // write everything but the last char
                    --count;
                    m_rle.push_back (-count);
                    for (int i = 0;  i < count;  ++i)
                        m_rle.push_back (data[(runbegin+i)*xstride+byteoffset]);
                    count = 2;
                    runbegin = x - 1;
                    repeat = true;
                } else {
                    ++count;  // another non-repeat
                }
            }

            // If the run is too long or we're at the scanline end, write
            if (count == 127 || x == m_spec.width-1) {
                if (repeat) {
                    m_rle.push_back (count-1); 
                    m_rle.push_back (lastval);
                } else {
                    m_rle.push_back (-count);
                    for (int i = 0;  i < count;  ++i)
                        m_rle.push_back (data[(runbegin+i)*xstride+byteoffset]);
                }
                count = 0;
            }
            lastval = newval;
        }
        ASSERT (count == 0);
    }

    // Now that we know the size of the encoded buffer, save it at the
    // beginning
    uint16_t size = uint16_t (m_rle.size() - 2);
    m_rle[0] = size >> 8;
    m_rle[1] = size & 255;

    // And write the channel to the file
    return write (&m_rle[0], m_rle.size());
}
コード例 #13
0
OSL_SHADEOP int
osl_getmessage (ShaderGlobals *sg, const char *source_, const char *name_,
                long long type_, void *val, int derivs,
                int layeridx, const char* sourcefile_, int sourceline)
{
    const ustring &source (USTR(source_));
    const ustring &name (USTR(name_));
    const ustring &sourcefile (USTR(sourcefile_));

    // recreate TypeDesc -- we just crammed it into an int!
    TypeDesc type (*(TypeDesc *)&type_);
    bool is_closure = (type.basetype == TypeDesc::UNKNOWN); // secret code for closure
    if (is_closure)
        type.basetype = TypeDesc::PTR;  // for closures, we store a pointer

    static ustring ktrace ("trace");
    if (source == ktrace) {
        // Source types where we need to ask the renderer
        RendererServices *renderer = sg->context->renderer();
        return renderer->getmessage (sg, source, name, type, val, derivs);
    }

    MessageList &messages (sg->context->messages());
    const Message* m = messages.find(name);
    if (m != NULL) {
        if (m->name == name) {
            if (m->type != type) {
                // found message, but types don't match
                sg->context->error(
                    "type mismatch for message \"%s\" (%s as %s here: %s:%d)"
                    " cannot fetch as %s from %s:%d",
                    name.c_str(),
                    m->has_data() ? "created" : "queried",
                    m->type == TypeDesc::PTR ? "closure color" : m->type.c_str(),
                    m->sourcefile.c_str(),
                    m->sourceline,
                    is_closure ? "closure color" : type.c_str(),
                    sourcefile.c_str(),
                    sourceline);
                return 0;
            }
            if (!m->has_data()) {
                // getmessage ran before and found nothing - just return 0
                return 0;
            }
            if (m->layeridx > layeridx) {
                // found message, but was set by a layer deeper than the one querying the message
                sg->context->error(
                    "message \"%s\" was set by layer #%d (%s:%d)"
                    " but is being queried by layer #%d (%s:%d)"
                    " - messages may only be transfered from nodes "
                    "that appear earlier in the shading network",
                    name.c_str(),
                    m->layeridx,
                    m->sourcefile.c_str(),
                    m->sourceline,
                    layeridx,
                    sourcefile.c_str(),
                    sourceline);
                return 0;
            }
            // Message found!
            size_t size = type.size();
            memcpy (val, m->data, size);
            if (derivs) // TODO: move this to llvm code gen?
                memset (((char *)val)+size, 0, 2*size);
            return 1;
        }
    }
    // Message not found -- we must record this event in case another layer tries to set the message again later on
    if (sg->context->shadingsys().strict_messages())
        messages.add(name, NULL, type, layeridx, sourcefile, sourceline);
    return 0;
}
コード例 #14
0
ファイル: imageoutput.cpp プロジェクト: Nvizible/oiio
bool
ImageOutput::write_image (TypeDesc format, const void *data,
                          stride_t xstride, stride_t ystride, stride_t zstride,
                          ProgressCallback progress_callback,
                          void *progress_callback_data)
{
    bool native = (format == TypeDesc::UNKNOWN);
    stride_t pixel_bytes = native ? (stride_t) m_spec.pixel_bytes (native)
                                  : format.size() * m_spec.nchannels;
    if (xstride == AutoStride)
        xstride = pixel_bytes;
    m_spec.auto_stride (xstride, ystride, zstride, format,
                        m_spec.nchannels, m_spec.width, m_spec.height);

    if (supports ("rectangles")) {
        // Use a rectangle if we can
        return write_rectangle (0, m_spec.width, 0, m_spec.height, 0, m_spec.depth,
                                format, data, xstride, ystride, zstride);
    }

    bool ok = true;
    if (progress_callback && progress_callback (progress_callback_data, 0.0f))
        return ok;
    if (m_spec.tile_width && supports ("tiles")) {
        // Tiled image
        for (int z = 0;  z < m_spec.depth;  z += m_spec.tile_depth) {
            int zend = std::min (z+m_spec.z+m_spec.tile_depth,
                                 m_spec.z+m_spec.depth);
            for (int y = 0;  y < m_spec.height;  y += m_spec.tile_height) {
                int yend = std::min (y+m_spec.y+m_spec.tile_height,
                                     m_spec.y+m_spec.height);
                const char *d = (const char *)data + z*zstride + y*ystride;
                ok &= write_tiles (m_spec.x, m_spec.x+m_spec.width,
                                   y+m_spec.y, yend, z+m_spec.z, zend,
                                   format, d, xstride, ystride, zstride);
                if (progress_callback &&
                    progress_callback (progress_callback_data,
                                       (float)(z*m_spec.height+y)/(m_spec.height*m_spec.depth)))
                    return ok;
            }
        }
    } else {
        // Scanline image
        const int chunk = 256;
        for (int z = 0;  z < m_spec.depth;  ++z)
            for (int y = 0;  y < m_spec.height && ok;  y += chunk) {
                int yend = std::min (y+m_spec.y+chunk, m_spec.y+m_spec.height);
                const char *d = (const char *)data + z*zstride + y*ystride;
                ok &= write_scanlines (y+m_spec.y, yend, z+m_spec.z,
                                       format, d, xstride, ystride);
                if (progress_callback &&
                    progress_callback (progress_callback_data,
                                       (float)(z*m_spec.height+y)/(m_spec.height*m_spec.depth)))
                    return ok;
            }
    }
    if (progress_callback)
        progress_callback (progress_callback_data, 1.0f);

    return ok;
}
コード例 #15
0
void
ShaderInstance::parameters (const ParamValueList &params)
{
    // Seed the params with the master's defaults
    m_iparams = m_master->m_idefaults;
    m_fparams = m_master->m_fdefaults;
    m_sparams = m_master->m_sdefaults;

    m_instoverrides.resize (std::max (0, lastparam()));

    // Set the initial lockgeom and dataoffset on the instoverrides, based
    // on the master.
    for (int i = 0, e = (int)m_instoverrides.size(); i < e; ++i) {
        Symbol *sym = master()->symbol(i);
        m_instoverrides[i].lockgeom (sym->lockgeom());
        m_instoverrides[i].dataoffset (sym->dataoffset());
    }

    for (auto&& p : params) {
        if (p.name().size() == 0)
            continue;   // skip empty names
        int i = findparam (p.name());
        if (i >= 0) {
            // if (shadingsys().debug())
            //     shadingsys().info (" PARAMETER %s %s", p.name(), p.type());
            const Symbol *sm = master()->symbol(i);    // This sym in the master
            SymOverrideInfo *so = &m_instoverrides[i]; // Slot for sym's override info
            TypeSpec sm_typespec = sm->typespec(); // Type of the master's param
            if (sm_typespec.is_closure_based()) {
                // Can't assign a closure instance value.
                shadingsys().warning ("skipping assignment of closure: %s", sm->name());
                continue;
            }
            if (sm_typespec.is_structure())
                continue;    // structs are just placeholders; skip

            const void *data = p.data();
            float tmpdata[3]; // used for inline conversions to float/float[3]

            // Check type of parameter and matching symbol. Note that the
            // compatible accounts for indefinite-length arrays.
            TypeDesc paramtype = sm_typespec.simpletype();  // what the shader writer wants
            TypeDesc valuetype = p.type();                  // what the data provided actually is

            if (master()->shadingsys().relaxed_param_typecheck()) {
                // first handle cases where we actually need to modify the data (like setting a float parameter with an int)
                 if ((paramtype == TypeDesc::FLOAT || paramtype.is_vec3()) && valuetype.basetype == TypeDesc::INT && valuetype.basevalues() == 1) {
                    int val = *static_cast<const int*>(p.data());
                    float conv = float(val);
                    if (val != int(conv))
                        shadingsys().error ("attempting to set parameter from wrong type would change the value: %s (set %.9g from %d)",
                            sm->name(), conv, val);
                    tmpdata[0] = conv;
                    data = tmpdata;
                    valuetype = TypeDesc::FLOAT;
                }

                // Relaxed rules just look to see that the types are isomorphic to each other (ie: same number of base values)
                // Note that:
                //   * basetypes must match exactly (int vs float vs string)
                //   * valuetype cannot be unsized (we must know the concrete number of values)
                //   * if paramtype is sized (or not an array) just check for the total number of entries
                //   * if paramtype is unsized (shader writer is flexible about how many values come in) -- make sure we are a multiple of the target type
                //   * allow a single float setting a vec3 (or equivalent)
                if (!( valuetype.basetype == paramtype.basetype &&
                      !valuetype.is_unsized_array() &&
                      ((!paramtype.is_unsized_array() && valuetype.basevalues() == paramtype.basevalues()) ||
                       ( paramtype.is_unsized_array() && valuetype.basevalues() % paramtype.aggregate == 0) ||
                       ( paramtype.is_vec3()          && valuetype == TypeDesc::FLOAT) ) )) {
                    // We are being very relaxed in this mode, so if the user _still_ got it wrong
                    // something more serious is at play and we should treat it as an error.
                    shadingsys().error ("attempting to set parameter from incompatible type: %s (expected '%s', received '%s')",
                                          sm->name(), paramtype, valuetype);
                    continue;
                }
            } else if (!compatible_param(paramtype, valuetype)) {
                shadingsys().warning ("attempting to set parameter with wrong type: %s (expected '%s', received '%s')",
                                      sm->name(), paramtype, valuetype);
                continue;
            }

            // Mark that the override as an instance value
            so->valuesource (Symbol::InstanceVal);

            // Lock the param against geometric primitive overrides if the
            // master thinks it was so locked, AND the Parameter() call
            // didn't specify lockgeom=false (which would be indicated by
            // the parameter's interpolation being non-CONSTANT).
            bool lockgeom = (sm->lockgeom() &&
                             p.interp() == ParamValue::INTERP_CONSTANT);
            so->lockgeom (lockgeom);

            DASSERT (so->dataoffset() == sm->dataoffset());
            so->dataoffset (sm->dataoffset());

            if (paramtype.is_vec3() && valuetype == TypeDesc::FLOAT) {
                // Handle the special case of assigning a float for a triple
                // by replicating it into local memory.
                tmpdata[0] = *(const float *)data;
                tmpdata[1] = *(const float *)data;
                tmpdata[2] = *(const float *)data;
                data = &tmpdata;
                valuetype = paramtype;
            }

            if (paramtype.arraylen < 0) {
                // An array of definite size was supplied to a parameter
                // that was an array of indefinite size. Magic! The trick
                // here is that we need to allocate paramter space at the
                // END of the ordinary param storage, since when we assigned
                // data offsets to each parameter, we didn't know the length
                // needed to allocate this param in its proper spot.
                int nelements = valuetype.basevalues();
                // Store the actual length in the shader instance parameter
                // override info. Compute the length this way to account for relaxed
                // parameter checking (for example passing an array of floats to an array of colors)
                so->arraylen (nelements / paramtype.aggregate);
                // Allocate space for the new param size at the end of its
                // usual parameter area, and set the new dataoffset to that
                // position.
                if (paramtype.basetype == TypeDesc::FLOAT) {
                    so->dataoffset((int) m_fparams.size());
                    expand (m_fparams, nelements);
                } else if (paramtype.basetype == TypeDesc::INT) {
                    so->dataoffset((int) m_iparams.size());
                    expand (m_iparams, nelements);
                } else if (paramtype.basetype == TypeDesc::STRING) {
                    so->dataoffset((int) m_sparams.size());
                    expand (m_sparams, nelements);
                } else {
                    ASSERT (0 && "unexpected type");
                }
                // FIXME: There's a tricky case that we overlook here, where
                // an indefinite-length-array parameter is given DIFFERENT
                // definite length in subsequent rerenders. Don't do that.
            }
            else {
                // If the instance value is the same as the master's default,
                // just skip the parameter, let it "keep" the default.
                // Note that this can't/shouldn't happen for the indefinite-
                // sized array case, which is why we have it in the 'else'
                // clause of that test.
                void *defaultdata = m_master->param_default_storage(i);
                if (lockgeom &&
                      memcmp (defaultdata, data, valuetype.size()) == 0) {
                    // Must reset valuesource to default, in case the parameter
                    // was set already, and now is being changed back to default.
                    so->valuesource (Symbol::DefaultVal);
                }
            }

            // Copy the supplied data into place.
            memcpy (param_storage(i), data, valuetype.size());
        }
        else {
            shadingsys().warning ("attempting to set nonexistent parameter: %s", p.name());
        }
    }

    {
        // Adjust the stats
        ShadingSystemImpl &ss (shadingsys());
        size_t symmem = vectorbytes(m_instoverrides);
        size_t parammem = (vectorbytes(m_iparams) + vectorbytes(m_fparams) +
                           vectorbytes(m_sparams));
        spin_lock lock (ss.m_stat_mutex);
        ss.m_stat_mem_inst_syms += symmem;
        ss.m_stat_mem_inst_paramvals += parammem;
        ss.m_stat_mem_inst += (symmem+parammem);
        ss.m_stat_memory += (symmem+parammem);
    }
}
コード例 #16
0
ファイル: imagebufalgo_opencv.cpp プロジェクト: AheadIO/oiio
IplImage *
ImageBufAlgo::to_IplImage (const ImageBuf &src)
{
#ifdef USE_OPENCV
    ImageBuf tmp = src;
    ImageSpec spec = tmp.spec();

    // Make sure the image buffer is initialized.
    if (!tmp.initialized() && !tmp.read(tmp.subimage(), tmp.miplevel(), true)) {
        DASSERT (0 && "Could not initialize ImageBuf.");
        return NULL;
    }

    int dstFormat;
    TypeDesc dstSpecFormat;
    if (spec.format == TypeDesc(TypeDesc::UINT8)) {
        dstFormat = IPL_DEPTH_8U;
        dstSpecFormat = spec.format;
    } else if (spec.format == TypeDesc(TypeDesc::INT8)) {
        dstFormat = IPL_DEPTH_8S;
        dstSpecFormat = spec.format;
    } else if (spec.format == TypeDesc(TypeDesc::UINT16)) {
        dstFormat = IPL_DEPTH_16U;
        dstSpecFormat = spec.format;
    } else if (spec.format == TypeDesc(TypeDesc::INT16)) {
        dstFormat = IPL_DEPTH_16S;
        dstSpecFormat = spec.format;
    } else if (spec.format == TypeDesc(TypeDesc::HALF)) {
        dstFormat = IPL_DEPTH_32F;
        // OpenCV does not support half types. Switch to float instead.
        dstSpecFormat = TypeDesc(TypeDesc::FLOAT);
    } else if (spec.format == TypeDesc(TypeDesc::FLOAT)) {
        dstFormat = IPL_DEPTH_32F;
        dstSpecFormat = spec.format;
    } else if (spec.format == TypeDesc(TypeDesc::DOUBLE)) {
        dstFormat = IPL_DEPTH_64F;
        dstSpecFormat = spec.format;
    } else {
        DASSERT (0 && "Unknown data format in ImageBuf.");
        return NULL;
    }
    IplImage *ipl = cvCreateImage(cvSize(spec.width, spec.height), dstFormat, spec.nchannels);
    if (!ipl) {
        DASSERT (0 && "Unable to create IplImage.");
        return NULL;
    }

    size_t pixelsize = dstSpecFormat.size() * spec.nchannels;
    // Account for the origin in the line step size, to end up with the
    // standard OIIO origin-at-upper-left:
    size_t linestep = ipl->origin ? -ipl->widthStep : ipl->widthStep;

    bool converted = convert_image(spec.nchannels, spec.width, spec.height, 1,
                                   tmp.localpixels(), spec.format,
                                   spec.pixel_bytes(), spec.scanline_bytes(), 0,
                                   ipl->imageData, dstSpecFormat,
                                   pixelsize, linestep, 0);

    if (!converted) {
        DASSERT (0 && "convert_image failed.");
        cvReleaseImage(&ipl);
        return NULL;
    }

    // OpenCV uses BGR ordering
    if (spec.nchannels == 3) {
        cvCvtColor(ipl, ipl, CV_RGB2BGR);
    } else if (spec.nchannels == 4) {
        cvCvtColor(ipl, ipl, CV_RGBA2BGRA);
    }

    return ipl;
#else
    return NULL;
#endif
}
コード例 #17
0
ファイル: rawinput.cpp プロジェクト: StereoD-Development/oiio
static void
exif_parser_cb (ImageSpec* spec, int tag, int tifftype, int len,
                unsigned int byteorder, LibRaw_abstract_datastream* ifp)
{
    // Oy, the data offsets are all going to be relative to the start of the
    // stream, not relative to our current position and data block. So we
    // need to remember that offset and pass its negative as the
    // offset_adjustment to the handler.
    size_t streampos = ifp->tell();
    // std::cerr << "Stream position " << streampos << "\n";

    TypeDesc type = tiff_datatype_to_typedesc (TIFFDataType(tifftype), size_t(len));
    const TagInfo* taginfo = tag_lookup ("Exif", tag);
    if (! taginfo) {
        // Strutil::fprintf (std::cerr, "NO TAGINFO FOR CALLBACK tag=%d (0x%x): tifftype=%d,len=%d (%s), byteorder=0x%x\n",
        //                   tag, tag, tifftype, len, type, byteorder);
        return;
    }
    if (type.size() >= (1<<20))
        return;   // sanity check -- too much memory
    size_t size = tiff_data_size(TIFFDataType(tifftype)) * len;
    std::vector<unsigned char> buf (size);
    ifp->read (buf.data(), size, 1);

    // debug scaffolding
    // Strutil::fprintf (std::cerr, "CALLBACK tag=%s: tifftype=%d,len=%d (%s), byteorder=0x%x\n",
    //                   taginfo->name, tifftype, len, type, byteorder);
    // for (int i = 0; i < std::min(16UL,size); ++i) {
    //     if (buf[i] >= ' ' && buf[i] < 128)
    //         std::cerr << char(buf[i]);
    //     Strutil::fprintf (std::cerr, "(%d) ", int(buf[i]));
    // }
    // std::cerr << "\n";

    bool swab = (littleendian() != (byteorder == 0x4949));
    if (swab) {
        if (type.basetype == TypeDesc::UINT16)
            swap_endian ((uint16_t *)buf.data(), len);
        if (type.basetype == TypeDesc::UINT32)
            swap_endian ((uint32_t *)buf.data(), len);
    }

    if (taginfo->handler) {
        TIFFDirEntry dir;
        dir.tdir_tag = uint16_t(tag);
        dir.tdir_type = uint16_t(tifftype);
        dir.tdir_count = uint32_t(len);
        dir.tdir_offset = 0;
        taginfo->handler (*taginfo, dir, buf, *spec, swab, -int(streampos));
        // std::cerr << "HANDLED " << taginfo->name << "\n";
        return;
    }
    if (taginfo->tifftype == TIFF_NOTYPE)
        return;   // skip
    if (tifftype == TIFF_RATIONAL || tifftype == TIFF_SRATIONAL) {
        spec->attribute (taginfo->name, type, buf.data());
        return;
    }
    if (type.basetype == TypeDesc::UINT16) {
        spec->attribute (taginfo->name, type, buf.data());
        return;
    }
    if (type.basetype == TypeDesc::UINT32) {
        spec->attribute (taginfo->name, type, buf.data());
        return;
    }
    if (type == TypeString) {
        spec->attribute (taginfo->name, string_view((char*)buf.data(), size));
        return;
    }
    // Strutil::fprintf (std::cerr, "RAW metadata NOT HANDLED: tag=%s: tifftype=%d,len=%d (%s), byteorder=0x%x\n",
    //                   taginfo->name, tifftype, len, type, byteorder);
}
コード例 #18
0
ファイル: exrinput.cpp プロジェクト: jamesvecore/oiio
void
OpenEXRInput::PartInfo::query_channels (const Imf::Header *header)
{
    ASSERT (! initialized);
    spec.nchannels = 0;
    const Imf::ChannelList &channels (header->channels());
    std::vector<std::string> channelnames;  // Order of channels in file
    std::vector<int> userchannels;      // Map file chans to user chans
    Imf::ChannelList::ConstIterator ci;
    int c;
    int red = -1, green = -1, blue = -1, alpha = -1, zee = -1;
    for (c = 0, ci = channels.begin();  ci != channels.end();  ++c, ++ci) {
        const char* name = ci.name();
        channelnames.push_back (name);
        if (red < 0 && (Strutil::iequals(name, "R") || Strutil::iequals(name, "Red") ||
                        Strutil::iends_with(name,".R") || Strutil::iends_with(name,".Red") ||
                        Strutil::iequals(name, "real")))
            red = c;
        if (green < 0 && (Strutil::iequals(name, "G") || Strutil::iequals(name, "Green") ||
                          Strutil::iends_with(name,".G") || Strutil::iends_with(name,".Green") ||
                          Strutil::iequals(name, "imag")))
            green = c;
        if (blue < 0 && (Strutil::iequals(name, "B") || Strutil::iequals(name, "Blue") ||
                         Strutil::iends_with(name,".B") || Strutil::iends_with(name,".Blue")))
            blue = c;
        if (alpha < 0 && (Strutil::iequals(name, "A") || Strutil::iequals(name, "Alpha") ||
                          Strutil::iends_with(name,".A") || Strutil::iends_with(name,".Alpha")))
            alpha = c;
        if (zee < 0 && (Strutil::iequals(name, "Z") || Strutil::iequals(name, "Depth") ||
                        Strutil::iends_with(name,".Z") || Strutil::iends_with(name,".Depth")))
            zee = c;
    }
    spec.nchannels = (int)channelnames.size();
    userchannels.resize (spec.nchannels);
    int nc = 0;
    if (red >= 0) {
        spec.channelnames.push_back (channelnames[red]);
        userchannels[red] = nc++;
    }
    if (green >= 0) {
        spec.channelnames.push_back (channelnames[green]);
        userchannels[green] = nc++;
    }
    if (blue >= 0) {
        spec.channelnames.push_back (channelnames[blue]);
        userchannels[blue] = nc++;
    }
    if (alpha >= 0) {
        spec.channelnames.push_back (channelnames[alpha]);
        spec.alpha_channel = nc;
        userchannels[alpha] = nc++;
    }
    if (zee >= 0) {
        spec.channelnames.push_back (channelnames[zee]);
        spec.z_channel = nc;
        userchannels[zee] = nc++;
    }
    for (c = 0, ci = channels.begin();  ci != channels.end();  ++c, ++ci) {
        if (red == c || green == c || blue == c || alpha == c || zee == c)
            continue;   // Already accounted for this channel
        userchannels[c] = nc;
        spec.channelnames.push_back (ci.name());
        ++nc;
    }
    ASSERT ((int)spec.channelnames.size() == spec.nchannels);
    // FIXME: should we also figure out the layers?

    // Figure out data types -- choose the highest range
    spec.format = TypeDesc::UNKNOWN;
    std::vector<TypeDesc> chanformat;
    for (c = 0, ci = channels.begin();  ci != channels.end();  ++c, ++ci) {
        Imf::PixelType ptype = ci.channel().type;
        TypeDesc fmt = TypeDesc::HALF;
        switch (ptype) {
        case Imf::UINT :
            fmt = TypeDesc::UINT;
            if (spec.format == TypeDesc::UNKNOWN)
                spec.format = TypeDesc::UINT;
            break;
        case Imf::HALF :
            fmt = TypeDesc::HALF;
            if (spec.format != TypeDesc::FLOAT)
                spec.format = TypeDesc::HALF;
            break;
        case Imf::FLOAT :
            fmt = TypeDesc::FLOAT;
            spec.format = TypeDesc::FLOAT;
            break;
        default: ASSERT (0);
        }
        pixeltype.push_back (ptype);
        chanbytes.push_back (fmt.size());
        if (chanformat.size() == 0)
            chanformat.resize (spec.nchannels, fmt);
        for (int i = 0;  i < spec.nchannels;  ++i) {
            ASSERT ((int)spec.channelnames.size() > i);
            if (spec.channelnames[i] == ci.name()) {
                chanformat[i] = fmt;
                break;
            }
        }
    }
    ASSERT (spec.format != TypeDesc::UNKNOWN);
    bool differing_chanformats = false;
    for (int c = 1;  c < spec.nchannels;  ++c)
        differing_chanformats |= (chanformat[c] != chanformat[0]);
    if (differing_chanformats)
        spec.channelformats = chanformat;
}
コード例 #19
0
ファイル: imageoutput.cpp プロジェクト: angeljimenez/oiio
const void *
ImageOutput::to_native_rectangle (int xmin, int xmax, int ymin, int ymax,
                                  int zmin, int zmax, 
                                  TypeDesc format, const void *data,
                                  stride_t xstride, stride_t ystride, stride_t zstride,
                                  std::vector<unsigned char> &scratch)
{
    stride_t native_pixel_bytes = (stride_t) m_spec.pixel_bytes (true);
    if (format == TypeDesc::UNKNOWN && xstride == AutoStride)
        xstride = native_pixel_bytes;
    m_spec.auto_stride (xstride, ystride, zstride, format,
                        m_spec.nchannels, xmax-xmin+1, ymax-ymin+1);

    // Compute width and height from the rectangle extents
    int width = xmax - xmin + 1;
    int height = ymax - ymin + 1;
    int depth = zmax - zmin + 1;

    // Do the strides indicate that the data are already contiguous?
    bool contiguous = (xstride == native_pixel_bytes &&
                       (ystride == xstride*width || height == 1) &&
                       (zstride == ystride*height || depth == 1));
    // Does the user already have the data in the right format?
    bool rightformat = (format == TypeDesc::UNKNOWN) ||
        (format == m_spec.format && m_spec.channelformats.empty());
    if (rightformat && contiguous) {
        // Data are already in the native format and contiguous
        // just return a ptr to the original data.
        return data;
    }

    imagesize_t rectangle_pixels = width * height * depth;
    imagesize_t rectangle_values = rectangle_pixels * m_spec.nchannels;
    imagesize_t rectangle_bytes = rectangle_pixels * native_pixel_bytes;

    // Handle the per-channel format case
    if (m_spec.channelformats.size() && supports("channelformats")) {
        ASSERT (contiguous && "Per-channel output requires contiguous strides");
        ASSERT (format != TypeDesc::UNKNOWN);
        scratch.resize (rectangle_bytes);
        size_t offset = 0;
        for (int c = 0;  c < (int)m_spec.channelformats.size();  ++c) {
            TypeDesc chanformat = m_spec.channelformats[c];
            convert_image (1 /* channels */, width, height, depth,
                           (char *)data + c*m_spec.format.size(), format,
                           xstride, ystride, zstride, 
                           &scratch[offset], chanformat,
                           native_pixel_bytes, AutoStride, AutoStride, NULL,
                           c == m_spec.alpha_channel ? 0 : -1,
                           c == m_spec.z_channel ? 0 : -1);
            offset = chanformat.size ();
        }
        return &scratch[0];
    }

    imagesize_t contiguoussize = contiguous ? 0 : rectangle_values * native_pixel_bytes;
    contiguoussize = (contiguoussize+3) & (~3); // Round up to 4-byte boundary
    DASSERT ((contiguoussize & 3) == 0);
    imagesize_t floatsize = rectangle_values * sizeof(float);
    scratch.resize (contiguoussize + floatsize + rectangle_bytes);

    // Force contiguity if not already present
    if (! contiguous) {
        data = contiguize (data, m_spec.nchannels, xstride, ystride, zstride,
                           (void *)&scratch[0], width, height, depth, format);
    }

    // Rather than implement the entire cross-product of possible
    // conversions, use float as an intermediate format, which generally
    // will always preserve enough precision.
    const float *buf;
    if (format == TypeDesc::FLOAT) {
        // Already in float format -- leave it as-is.
        buf = (float *)data;
    } else {
        // Convert to from 'format' to float.
        buf = convert_to_float (data, (float *)&scratch[contiguoussize],
                                rectangle_values, format);
    }
    
    // Convert from float to native format.
    return convert_from_float (buf, &scratch[contiguoussize+floatsize], 
                       rectangle_values, m_spec.quant_black, m_spec.quant_white,
                       m_spec.quant_min, m_spec.quant_max,
                       m_spec.format);
}