VM_Data_Type type_info_get_type(Type_Info_Handle tih) { assert(tih); TypeDesc* td = (TypeDesc*)tih; switch (td->get_kind()) { case K_S1: return VM_DATA_TYPE_INT8; case K_S2: return VM_DATA_TYPE_INT16; case K_S4: return VM_DATA_TYPE_INT32; case K_S8: return VM_DATA_TYPE_INT64; case K_Sp: return VM_DATA_TYPE_INTPTR; case K_U1: return VM_DATA_TYPE_UINT8; case K_U2: return VM_DATA_TYPE_UINT16; case K_U4: return VM_DATA_TYPE_UINT32; case K_U8: return VM_DATA_TYPE_UINT64; case K_Up: return VM_DATA_TYPE_UINTPTR; case K_F4: return VM_DATA_TYPE_F4; case K_F8: return VM_DATA_TYPE_F8; case K_Char: return VM_DATA_TYPE_CHAR; case K_Boolean: return VM_DATA_TYPE_BOOLEAN; case K_Void: return VM_DATA_TYPE_VOID; case K_Object: return VM_DATA_TYPE_CLASS; case K_Vector: return VM_DATA_TYPE_ARRAY; case K_UnboxedValue: return VM_DATA_TYPE_VALUE; case K_UnmanagedPointer: return VM_DATA_TYPE_UP; case K_ManagedPointer: return VM_DATA_TYPE_MP; // The rest are not implemented in the VM_Data_Type scheme case K_Array: case K_MethodPointer: case K_TypedRef: default: DIE(("Invalid vm data type")); return VM_DATA_TYPE_INVALID; } } //type_info_get_type
BOOLEAN type_info_is_managed_pointer(Type_Info_Handle tih) { assert(tih); TypeDesc* td = (TypeDesc*)tih; assert(td); return td->is_managed_pointer(); } //type_info_is_managed_pointer
bool RLAOutput::write_scanline (int y, int z, TypeDesc format, const void *data, stride_t xstride) { m_spec.auto_stride (xstride, format, spec().nchannels); const void *origdata = data; data = to_native_scanline (format, data, xstride, m_scratch); if (data == origdata) { m_scratch.assign ((unsigned char *)data, (unsigned char *)data+m_spec.scanline_bytes()); data = &m_scratch[0]; } // store the offset to the scanline. We'll swap_endian if necessary // when we go to actually write it. m_sot[m_spec.height - y - 1] = (uint32_t)ftell (m_file); size_t pixelsize = m_spec.pixel_bytes (true /*native*/); int offset = 0; for (int c = 0; c < m_spec.nchannels; ++c) { TypeDesc chantype = m_spec.channelformats.size() ? m_spec.channelformats[c] : m_spec.format; int bits = (c < m_rla.NumOfColorChannels) ? m_rla.NumOfChannelBits : (c < (m_rla.NumOfColorChannels+m_rla.NumOfMatteBits)) ? m_rla.NumOfMatteBits : m_rla.NumOfAuxBits; if (!encode_channel ((unsigned char *)data + offset, pixelsize, chantype, bits)) return false; offset += chantype.size(); } return true; }
bool oiio_attribute_tuple_typed (const std::string &name, TypeDesc type, tuple &obj) { if (type.basetype == TypeDesc::INT) { std::vector<int> vals; py_to_stdvector (vals, obj); if (vals.size() == type.numelements()*type.aggregate) return OIIO::attribute (name, type, &vals[0]); return false; } if (type.basetype == TypeDesc::FLOAT) { std::vector<float> vals; py_to_stdvector (vals, obj); if (vals.size() == type.numelements()*type.aggregate) return OIIO::attribute (name, type, &vals[0]); return false; } if (type.basetype == TypeDesc::STRING) { std::vector<std::string> vals; py_to_stdvector (vals, obj); if (vals.size() == type.numelements()*type.aggregate) { std::vector<ustring> u; for (size_t i = 0, e = vals.size(); i < e; ++i) u.push_back (ustring(vals[i])); return OIIO::attribute (name, type, &u[0]); } return false; } return false; }
bool ImageInput::read_scanline (int y, int z, TypeDesc format, void *data, stride_t xstride) { // native_pixel_bytes is the size of a pixel in the FILE, including // the per-channel format. stride_t native_pixel_bytes = (stride_t) m_spec.pixel_bytes (true); // perchanfile is true if the file has different per-channel formats bool perchanfile = m_spec.channelformats.size(); // native_data is true if the user asking for data in the native format bool native_data = (format == TypeDesc::UNKNOWN || (format == m_spec.format && !perchanfile)); if (native_data && xstride == AutoStride) xstride = native_pixel_bytes; else m_spec.auto_stride (xstride, format, m_spec.nchannels); // Do the strides indicate that the data area is contiguous? bool contiguous = (native_data && xstride == native_pixel_bytes) || (!native_data && xstride == (stride_t)m_spec.pixel_bytes(false)); // If user's format and strides are set up to accept the native data // layout, read the scanline directly into the user's buffer. if (native_data && contiguous) return read_native_scanline (y, z, data); // Complex case -- either changing data type or stride int scanline_values = m_spec.width * m_spec.nchannels; unsigned char *buf = (unsigned char *) alloca (m_spec.scanline_bytes(true)); bool ok = read_native_scanline (y, z, buf); if (! ok) return false; if (! perchanfile) { // No per-channel formats -- do the conversion in one shot ok = contiguous ? convert_types (m_spec.format, buf, format, data, scanline_values) : convert_image (m_spec.nchannels, m_spec.width, 1, 1, buf, m_spec.format, AutoStride, AutoStride, AutoStride, data, format, xstride, AutoStride, AutoStride); } else { // Per-channel formats -- have to convert/copy channels individually ASSERT (m_spec.channelformats.size() == (size_t)m_spec.nchannels); size_t offset = 0; for (int c = 0; ok && c < m_spec.nchannels; ++c) { TypeDesc chanformat = m_spec.channelformats[c]; ok = convert_image (1 /* channels */, m_spec.width, 1, 1, buf+offset, chanformat, native_pixel_bytes, AutoStride, AutoStride, (char *)data + c*format.size(), format, xstride, AutoStride, AutoStride); offset += chanformat.size (); } } if (! ok) error ("ImageInput::read_scanline : no support for format %s", m_spec.format.c_str()); return ok; }
void OSOReaderToMaster::symbol (SymType symtype, TypeSpec typespec, const char *name_) { ustring name(name_); Symbol sym (name, typespec, symtype); TypeDesc t = typespec.simpletype(); int nvals = t.aggregate * (t.is_unsized_array() ? 1 : t.numelements()); if (sym.symtype() == SymTypeParam || sym.symtype() == SymTypeOutputParam) { // Skip structs for now, they're just placeholders if (typespec.is_structure()) { } else if (typespec.simpletype().basetype == TypeDesc::FLOAT) { sym.dataoffset ((int) m_master->m_fdefaults.size()); expand (m_master->m_fdefaults, nvals); } else if (typespec.simpletype().basetype == TypeDesc::INT) { sym.dataoffset ((int) m_master->m_idefaults.size()); expand (m_master->m_idefaults, nvals); } else if (typespec.simpletype().basetype == TypeDesc::STRING) { sym.dataoffset ((int) m_master->m_sdefaults.size()); expand (m_master->m_sdefaults, nvals); } else if (typespec.is_closure_based()) { // Closures are pointers, so we allocate a string default taking // adventage of their default being NULL as well. sym.dataoffset ((int) m_master->m_sdefaults.size()); expand (m_master->m_sdefaults, nvals); } else { ASSERT (0 && "unexpected type"); } } if (sym.symtype() == SymTypeConst) { if (typespec.simpletype().basetype == TypeDesc::FLOAT) { sym.dataoffset ((int) m_master->m_fconsts.size()); expand (m_master->m_fconsts, nvals); } else if (typespec.simpletype().basetype == TypeDesc::INT) { sym.dataoffset ((int) m_master->m_iconsts.size()); expand (m_master->m_iconsts, nvals); } else if (typespec.simpletype().basetype == TypeDesc::STRING) { sym.dataoffset ((int) m_master->m_sconsts.size()); expand (m_master->m_sconsts, nvals); } else { ASSERT (0 && "unexpected type"); } } #if 0 // FIXME -- global_heap_offset is quite broken. But also not necessary. // We made need to fix this later. if (sym.symtype() == SymTypeGlobal) { sym.dataoffset (m_shadingsys.global_heap_offset (sym.name())); } #endif sym.lockgeom (m_shadingsys.lockgeom_default()); m_master->m_symbols.push_back (sym); m_symmap[name] = int(m_master->m_symbols.size()) - 1; // Start the index at which we add specified defaults m_sym_default_index = 0; }
Class_Handle type_info_get_class(Type_Info_Handle tih) { TypeDesc* td = (TypeDesc*)tih; assert(td); Class* c = td->load_type_desc(); if(!c) return NULL; if(!c->verify(VM_Global_State::loader_env)) return NULL; if(!c->prepare(VM_Global_State::loader_env)) return NULL; return c; } //type_info_get_class
bool ImageInput::read_tile (int x, int y, int z, TypeDesc format, void *data, stride_t xstride, stride_t ystride, stride_t zstride) { stride_t native_pixel_bytes = (stride_t) m_spec.pixel_bytes (true); if (format == TypeDesc::UNKNOWN && xstride == AutoStride) xstride = native_pixel_bytes; m_spec.auto_stride (xstride, ystride, zstride, format, m_spec.nchannels, m_spec.tile_width, m_spec.tile_height); bool contiguous = (xstride == native_pixel_bytes && ystride == xstride*m_spec.tile_width && (zstride == ystride*m_spec.tile_height || zstride == 0)); // If user's format and strides are set up to accept the native data // layout, read the tile directly into the user's buffer. bool rightformat = (format == TypeDesc::UNKNOWN) || (format == m_spec.format && m_spec.channelformats.empty()); if (rightformat && contiguous) return read_native_tile (x, y, z, data); // Simple case // Complex case -- either changing data type or stride int tile_values = m_spec.tile_width * m_spec.tile_height * std::max(1,m_spec.tile_depth) * m_spec.nchannels; boost::scoped_array<char> buf (new char [m_spec.tile_bytes(true)]); bool ok = read_native_tile (x, y, z, &buf[0]); if (! ok) return false; if (m_spec.channelformats.empty()) { // No per-channel formats -- do the conversion in one shot ok = contiguous ? convert_types (m_spec.format, &buf[0], format, data, tile_values) : convert_image (m_spec.nchannels, m_spec.tile_width, m_spec.tile_height, m_spec.tile_depth, &buf[0], m_spec.format, AutoStride, AutoStride, AutoStride, data, format, xstride, ystride, zstride); } else { // Per-channel formats -- have to convert/copy channels individually size_t offset = 0; for (size_t c = 0; c < m_spec.channelformats.size(); ++c) { TypeDesc chanformat = m_spec.channelformats[c]; ok = convert_image (1 /* channels */, m_spec.tile_width, m_spec.tile_height, m_spec.tile_depth, &buf[offset], chanformat, native_pixel_bytes, AutoStride, AutoStride, (char *)data + c*m_spec.format.size(), format, xstride, AutoStride, AutoStride); offset += chanformat.size (); } } if (! ok) error ("ImageInput::read_tile : no support for format %s", m_spec.format.c_str()); return ok; }
TypeDesc* type_desc_create_from_java_descriptor(const char* d, ClassLoader* loader) { Global_Env* env = VM_Global_State::loader_env; switch (*d) { case 'B': return env->bootstrap_class_loader->get_primitive_type(K_S1); case 'C': return env->bootstrap_class_loader->get_primitive_type(K_Char); case 'D': return env->bootstrap_class_loader->get_primitive_type(K_F8); case 'F': return env->bootstrap_class_loader->get_primitive_type(K_F4); case 'I': return env->bootstrap_class_loader->get_primitive_type(K_S4); case 'J': return env->bootstrap_class_loader->get_primitive_type(K_S8); case 'S': return env->bootstrap_class_loader->get_primitive_type(K_S2); case 'Z': return env->bootstrap_class_loader->get_primitive_type(K_Boolean); case 'V': return env->bootstrap_class_loader->get_primitive_type(K_Void); case 'L': { const char* sn = d+1; const char* en = sn; while (en[0]!=';') { en++; } unsigned len = (unsigned)(en-sn); String* str = env->string_pool.lookup(sn, len); assert(loader); loader->LockTypesCache(); TypeDesc** tdres = loader->GetJavaTypes()->Lookup(str); if (tdres) { assert (*tdres); loader->UnlockTypesCache(); return *tdres; } TypeDesc* td = new TypeDesc(K_Object, NULL, NULL, str, loader, NULL); assert(td); loader->GetJavaTypes()->Insert(str, td); loader->UnlockTypesCache(); return td; } case '[': { // descriptor is checked in recursion TypeDesc* et = type_desc_create_from_java_descriptor(d+1, loader); if( !et ) { return NULL; } return et->type_desc_create_vector(); } default: DIE(("Bad type descriptor")); return NULL; } }
bool SimpleRenderer::get_camera_shutter_close (ShaderGlobals *sg, bool derivs, ustring object, TypeDesc type, ustring name, void *val) { if (type == TypeDesc::TypeFloat) { ((float *)val)[0] = m_shutter[1]; if (derivs) memset ((char *)val+type.size(), 0, 2*type.size()); return true; } return false; }
bool SimpleRenderer::get_camera_clip (ShaderGlobals *sg, bool derivs, ustring object, TypeDesc type, ustring name, void *val) { if (type == TypeFloatArray2) { ((float *)val)[0] = m_hither; ((float *)val)[1] = m_yon; if (derivs) memset ((char *)val+type.size(), 0, 2*type.size()); return true; } return false; }
bool SimpleRenderer::get_camera_fov (ShaderGlobals *sg, bool derivs, ustring object, TypeDesc type, ustring name, void *val) { // N.B. in a real rederer, this may be time-dependent if (type == TypeDesc::TypeFloat) { ((float *)val)[0] = m_fov; if (derivs) memset ((char *)val+type.size(), 0, 2*type.size()); return true; } return false; }
bool ImageOutput::copy_to_image_buffer (int xbegin, int xend, int ybegin, int yend, int zbegin, int zend, TypeDesc format, const void *data, stride_t xstride, stride_t ystride, stride_t zstride, void *image_buffer, TypeDesc buf_format) { const ImageSpec &spec (this->spec()); if (buf_format == TypeDesc::UNKNOWN) buf_format = spec.format; spec.auto_stride (xstride, ystride, zstride, format, spec.nchannels, spec.width, spec.height); stride_t buf_xstride = spec.nchannels * buf_format.size(); stride_t buf_ystride = buf_xstride * spec.width; stride_t buf_zstride = buf_ystride * spec.height; stride_t offset = (xbegin-spec.x)*buf_xstride + (ybegin-spec.y)*buf_ystride + (zbegin-spec.z)*buf_zstride; int width = xend-xbegin, height = yend-ybegin, depth = zend-zbegin; imagesize_t npixels = imagesize_t(width) * imagesize_t(height) * imagesize_t(depth); // Add dither if requested -- requires making a temporary staging area boost::scoped_array<float> ditherarea; unsigned int dither = spec.get_int_attribute ("oiio:dither", 0); if (dither && format.is_floating_point() && buf_format.basetype == TypeDesc::UINT8) { stride_t pixelsize = spec.nchannels * sizeof(float); ditherarea.reset (new float [pixelsize * npixels]); OIIO::convert_image (spec.nchannels, width, height, depth, data, format, xstride, ystride, zstride, ditherarea.get(), TypeDesc::FLOAT, pixelsize, pixelsize*width, pixelsize*width*height); data = ditherarea.get(); format = TypeDesc::FLOAT; xstride = pixelsize; ystride = xstride * width; zstride = ystride * height; float ditheramp = spec.get_float_attribute ("oiio:ditheramplitude", 1.0f/255.0f); OIIO::add_dither (spec.nchannels, width, height, depth, (float *)data, pixelsize, pixelsize*width, pixelsize*width*height, ditheramp, spec.alpha_channel, spec.z_channel, dither, 0, xbegin, ybegin, zbegin); } return OIIO::convert_image (spec.nchannels, width, height, depth, data, format, xstride, ystride, zstride, (char *)image_buffer + offset, buf_format, buf_xstride, buf_ystride, buf_zstride); }
BOOLEAN type_info_is_resolved(Type_Info_Handle tih) { TypeDesc* td = (TypeDesc*)tih; switch (td->get_kind()) { case K_Vector: if (td->get_element_type()->is_primitive()) { return true; } return type_info_is_resolved(td->get_element_type()); case K_Object: return td->is_loaded(); default: LDIE(73, "Unexpected kind"); return 0; } }
U_32 type_info_get_num_array_dimensions(Type_Info_Handle tih) { TypeDesc* td = (TypeDesc*)tih; if (td->get_kind() == K_Vector) { const String* name = td->get_type_name(); U_32 res = 0; if (name == NULL) { res = 1 + type_info_get_num_array_dimensions(td->get_element_type()); } else { res = countLeadingChars(name->bytes, '['); } assert(res<=255); return res; } return 0; }
bool SimpleRenderer::get_camera_screen_window (ShaderGlobals *sg, bool derivs, ustring object, TypeDesc type, ustring name, void *val) { // N.B. in a real rederer, this may be time-dependent if (type == TypeFloatArray4) { ((float *)val)[0] = m_screen_window[0]; ((float *)val)[1] = m_screen_window[1]; ((float *)val)[2] = m_screen_window[2]; ((float *)val)[3] = m_screen_window[3]; if (derivs) memset ((char *)val+type.size(), 0, 2*type.size()); return true; } return false; }
Type_Info_Handle type_info_get_type_info(Type_Info_Handle tih) { TypeDesc* td = (TypeDesc*)tih; assert(td); switch (td->get_kind()) { case K_Vector: case K_Array: return td->get_element_type(); case K_ManagedPointer: case K_UnmanagedPointer: return td->get_pointed_to_type(); default: LDIE(73, "Unexpected kind"); return 0; } } //type_info_get_type_info
object ImageInputWrap::read_tiles (int xbegin, int xend, int ybegin, int yend, int zbegin, int zend, int chbegin, int chend, TypeDesc format) { // Allocate our own temp buffer and try to read the scanline into it. // If the read fails, return None. const ImageSpec &spec = m_input->spec(); if (format.basetype == TypeDesc::UNKNOWN) format = spec.format; chend = clamp (chend, chbegin+1, spec.nchannels); int nchans = chend - chbegin; size_t size = (size_t) ((xend-xbegin) * (yend-ybegin) * (zend-zbegin) * nchans * format.size()); char *data = new char[size]; bool ok; { ScopedGILRelease gil; ok = m_input->read_tiles (xbegin, xend, ybegin, yend, zbegin, zend, chbegin, chend, format, data); } if (! ok) { delete [] data; // never mind return object(handle<>(Py_None)); } object array = C_array_to_Python_array (data, format, size); // clean up and return the array handle delete [] data; return array; }
py::object ImageCacheWrap::get_pixels (const std::string &filename_, int subimage, int miplevel, int xbegin, int xend, int ybegin, int yend, int zbegin, int zend, TypeDesc datatype) { ustring filename (filename_); if (datatype == TypeUnknown) datatype = TypeFloat; int chbegin = 0, chend = 0; if (! m_cache->get_image_info (filename, subimage, miplevel, ustring("channels"), TypeDesc::INT, &chend)) return py::none(); // couldn't open file size_t size = size_t ((xend-xbegin) * (yend-ybegin) * (zend-zbegin) * (chend-chbegin) * datatype.size()); std::unique_ptr<char[]> data (new char [size]); bool ok; { py::gil_scoped_release gil; ok = m_cache->get_pixels (filename, subimage, miplevel, xbegin, xend, ybegin, yend, zbegin, zend, datatype, data.get()); } if (ok) return make_numpy_array (datatype, data.release(), (zend-zbegin)>1 ? 4 : 3, chend-chbegin, xend-xbegin, yend-ybegin, zend-zbegin); else return py::none(); }
object ImageInputWrap::read_tile (int x, int y, int z, TypeDesc format) { // Allocate our own temp buffer and try to read the scanline into it. // If the read fails, return None. const ImageSpec &spec = m_input->spec(); if (format.basetype == TypeDesc::UNKNOWN) format = spec.format; size_t size = (size_t) spec.tile_pixels() * spec.nchannels * format.size(); char *data = new char[size]; bool ok; { ScopedGILRelease gil; ok = m_input->read_tile (x, y, z, format, data); } if (! ok) { delete [] data; // never mind return object(handle<>(Py_None)); } object array = C_array_to_Python_array (data, format, size); // clean up and return the array handle delete [] data; return array; }
bool convert_types (TypeDesc src_type, const void *src, TypeDesc dst_type, void *dst, int n) { // If no conversion is necessary, just memcpy if ((src_type == dst_type || dst_type.basetype == TypeDesc::UNKNOWN)) { memcpy (dst, src, n * src_type.size()); return true; } if (dst_type == TypeDesc::TypeFloat) { // Special case -- converting non-float to float pvt::convert_to_float (src, (float *)dst, n, src_type); return true; } // Conversion is to a non-float type boost::scoped_array<float> tmp; // In case we need a lot of temp space float *buf = (float *)src; if (src_type != TypeDesc::TypeFloat) { // If src is also not float, convert through an intermediate buffer if (n <= 4096) // If < 16k, use the stack buf = ALLOCA (float, n); else { tmp.reset (new float[n]); // Freed when tmp exists its scope buf = tmp.get(); } pvt::convert_to_float (src, buf, n, src_type); }
void BackendLLVM::llvm_generate_debug_uninit (const Opcode &op) { for (int i = 0; i < op.nargs(); ++i) { Symbol &sym (*opargsym (op, i)); if (! op.argread(i)) continue; if (sym.typespec().is_closure_based()) continue; TypeDesc t = sym.typespec().simpletype(); if (t.basetype != TypeDesc::FLOAT && t.basetype != TypeDesc::INT && t.basetype != TypeDesc::STRING) continue; // just check float, int, string based types llvm::Value *ncheck = ll.constant (int(t.numelements() * t.aggregate)); llvm::Value *offset = ll.constant(0); // Some special cases... if (op.opname() == Strings::op_for && i == 0) { // The first argument of 'for' is the condition temp, but // note that it may not have had its initializer run yet, so // don't generate uninit test code for it. continue; } if (op.opname() == op_aref && i == 1) { // Special case -- array assignment -- only check one element llvm::Value *ind = llvm_load_value (*opargsym (op, 2)); llvm::Value *agg = ll.constant(t.aggregate); offset = t.aggregate == 1 ? ind : ll.op_mul (ind, agg); ncheck = agg; } else if (op.opname() == op_compref && i == 1) { // Special case -- component assignment -- only check one channel llvm::Value *ind = llvm_load_value (*opargsym (op, 2)); offset = ind; ncheck = ll.constant(1); } llvm::Value *args[] = { ll.constant(t), llvm_void_ptr(sym), sg_void_ptr(), ll.constant(op.sourcefile()), ll.constant(op.sourceline()), ll.constant(sym.name()), offset, ncheck }; ll.call_function ("osl_uninit_check", args, 8); } }
bool ImageInput::read_image (int chbegin, int chend, TypeDesc format, void *data, stride_t xstride, stride_t ystride, stride_t zstride, ProgressCallback progress_callback, void *progress_callback_data) { if (chend < 0) chend = m_spec.nchannels; chend = clamp (chend, chbegin+1, m_spec.nchannels); int nchans = chend - chbegin; bool native = (format == TypeDesc::UNKNOWN); stride_t pixel_bytes = native ? (stride_t) m_spec.pixel_bytes (chbegin, chend, native) : (stride_t) (format.size()*nchans); if (native && xstride == AutoStride) xstride = pixel_bytes; m_spec.auto_stride (xstride, ystride, zstride, format, nchans, m_spec.width, m_spec.height); bool ok = true; if (progress_callback) if (progress_callback (progress_callback_data, 0.0f)) return ok; if (m_spec.tile_width) { // Tiled image for (int z = 0; z < m_spec.depth; z += m_spec.tile_depth) { for (int y = 0; y < m_spec.height && ok; y += m_spec.tile_height) { ok &= read_tiles (m_spec.x, m_spec.x+m_spec.width, y+m_spec.y, std::min (y+m_spec.y+m_spec.tile_height, m_spec.y+m_spec.height), z+m_spec.z, std::min (z+m_spec.z+m_spec.tile_depth, m_spec.z+m_spec.depth), chbegin, chend, format, (char *)data + z*zstride + y*ystride, xstride, ystride, zstride); if (progress_callback && progress_callback (progress_callback_data, (float)y/m_spec.height)) return ok; } } } else { // Scanline image -- rely on read_scanlines, in chunks of oiio_read_chunk int read_chunk = oiio_read_chunk; if (!read_chunk) { read_chunk = m_spec.height; } for (int z = 0; z < m_spec.depth; ++z) for (int y = 0; y < m_spec.height && ok; y += read_chunk) { int yend = std::min (y+m_spec.y+read_chunk, m_spec.y+m_spec.height); ok &= read_scanlines (y+m_spec.y, yend, z+m_spec.z, chbegin, chend, format, (char *)data + z*zstride + y*ystride, xstride, ystride); if (progress_callback) if (progress_callback (progress_callback_data, (float)y/m_spec.height)) return ok; } } if (progress_callback) progress_callback (progress_callback_data, 1.0f); return ok; }
bool ImageBuf::copy_pixels (int xbegin, int xend, int ybegin, int yend, TypeDesc format, void *result) const { #if 1 // Fancy method -- for each possible base type that the user // wants for a destination type, call a template specialization. switch (format.basetype) { case TypeDesc::UINT8 : copy_pixels<unsigned char> (xbegin, xend, ybegin, yend, (unsigned char *)result); break; case TypeDesc::INT8: copy_pixels<char> (xbegin, xend, ybegin, yend, (char *)result); break; case TypeDesc::UINT16 : copy_pixels<unsigned short> (xbegin, xend, ybegin, yend, (unsigned short *)result); break; case TypeDesc::INT16 : copy_pixels<short> (xbegin, xend, ybegin, yend, (short *)result); break; case TypeDesc::UINT : copy_pixels<unsigned int> (xbegin, xend, ybegin, yend, (unsigned int *)result); break; case TypeDesc::INT : copy_pixels<int> (xbegin, xend, ybegin, yend, (int *)result); break; case TypeDesc::HALF : copy_pixels<half> (xbegin, xend, ybegin, yend, (half *)result); break; case TypeDesc::FLOAT : copy_pixels<float> (xbegin, xend, ybegin, yend, (float *)result); break; case TypeDesc::DOUBLE : copy_pixels<double> (xbegin, xend, ybegin, yend, (double *)result); break; case TypeDesc::UINT64 : copy_pixels<unsigned long long> (xbegin, xend, ybegin, yend, (unsigned long long *)result); break; case TypeDesc::INT64 : copy_pixels<long long> (xbegin, xend, ybegin, yend, (long long *)result); break; default: return false; } #else // Naive method -- loop over pixels, calling getpixel() size_t usersize = format.size() * nchannels(); float *pel = (float *) alloca (nchannels() * sizeof(float)); for (int y = ybegin; y < yend; ++y) for (int x = xbegin; x < xend; ++x) { getpixel (x, y, pel); convert_types (TypeDesc::TypeFloat, pel, format, result, nchannels()); result = (void *) ((char *)result + usersize); } #endif return true; }
inline std::string print_vals (const Symbol &s) { std::stringstream out; TypeDesc t = s.typespec().simpletype(); int n = t.aggregate * t.numelements(); if (t.basetype == TypeDesc::FLOAT) { for (int j = 0; j < n; ++j) out << (j ? " " : "") << ((float *)s.data())[j]; } else if (t.basetype == TypeDesc::INT) { for (int j = 0; j < n; ++j) out << (j ? " " : "") << ((int *)s.data())[j]; } else if (t.basetype == TypeDesc::STRING) { for (int j = 0; j < n; ++j) out << (j ? " " : "") << "\"" << ((ustring *)s.data())[j] << "\""; } return out.str(); }
void BackendLLVM::llvm_generate_debugnan (const Opcode &op) { for (int i = 0; i < op.nargs(); ++i) { Symbol &sym (*opargsym (op, i)); if (! op.argwrite(i)) continue; TypeDesc t = sym.typespec().simpletype(); if (t.basetype != TypeDesc::FLOAT) continue; // just check float-based types llvm::Value *ncomps = ll.constant (int(t.numelements() * t.aggregate)); llvm::Value *offset = ll.constant(0); llvm::Value *ncheck = ncomps; if (op.opname() == op_aassign) { // Special case -- array assignment -- only check one element ASSERT (i == 0 && "only arg 0 is written for aassign"); llvm::Value *ind = llvm_load_value (*opargsym (op, 1)); llvm::Value *agg = ll.constant(t.aggregate); offset = t.aggregate == 1 ? ind : ll.op_mul (ind, agg); ncheck = agg; } else if (op.opname() == op_compassign) { // Special case -- component assignment -- only check one channel ASSERT (i == 0 && "only arg 0 is written for compassign"); llvm::Value *ind = llvm_load_value (*opargsym (op, 1)); offset = ind; ncheck = ll.constant(1); } llvm::Value *args[] = { ncomps, llvm_void_ptr(sym), ll.constant((int)sym.has_derivs()), sg_void_ptr(), ll.constant(op.sourcefile()), ll.constant(op.sourceline()), ll.constant(sym.name()), offset, ncheck, ll.constant(op.opname()) }; ll.call_function ("osl_naninf_check", args, 10); } }
bool ImageOutputWrap::write_scanline (int y, int z, TypeDesc format, object &buffer, stride_t xstride) { bool native = (format == TypeDesc::UNKNOWN); imagesize_t size = native ? m_output->spec().scanline_bytes (native) : format.size() * m_output->spec().nchannels * m_output->spec().width; const void *array = make_read_buffer (buffer, size); ScopedGILRelease gil; return m_output->write_scanline(y, z, format, array, xstride); }
void RuntimeOptimizer::llvm_generate_debugnan (const Opcode &op) { for (int i = 0; i < op.nargs(); ++i) { Symbol &sym (*opargsym (op, i)); if (! op.argwrite(i)) continue; TypeDesc t = sym.typespec().simpletype(); if (t.basetype != TypeDesc::FLOAT) continue; // just check float-based types int ncomps = t.numelements() * t.aggregate; llvm::Value *args[] = { llvm_constant(ncomps), llvm_void_ptr(sym), llvm_constant((int)sym.has_derivs()), sg_void_ptr(), llvm_constant(op.sourcefile()), llvm_constant(op.sourceline()), llvm_constant(sym.name()) }; llvm_call_function ("osl_naninf_check", args, 7); } }
static void parse_elements (const std::string &name, TypeDesc type, const std::string &type_code, const std::string &elements, int num_elements, ImageIOParameter ¶m) { void *data = new T[num_elements]; char *data_ptr = (char *) data; size_t element_size = type.elementtype().elementsize (); boost::char_separator<char> sep (", "); boost::tokenizer<boost::char_separator<char> > tokens (elements, sep); BOOST_FOREACH (std::string element, tokens) { sscanf (element.c_str (), type_code.c_str (), (T *)data_ptr); data_ptr += element_size; }
bool ImageOutputWrap::write_image (TypeDesc format, object &buffer, stride_t xstride, stride_t ystride, stride_t zstride) { bool native = (format == TypeDesc::UNKNOWN); imagesize_t size = native ? m_output->spec().image_bytes (native) : format.size() * m_output->spec().nchannels * m_output->spec().image_pixels(); const void *array = make_read_buffer (buffer, size); ScopedGILRelease gil; if (array) return m_output->write_image (format, array, xstride, ystride, zstride); return false; }