int coda_hdf4_cursor_read_string(const coda_cursor *cursor, char *dst, long dst_size) { if (((coda_hdf4_type *)cursor->stack[cursor->n - 1].type)->tag == tag_hdf4_string) { long length; if (coda_hdf4_cursor_get_string_length(cursor, &length) != 0) { return -1; } if (dst_size >= length + 1) { /* we can directly read into the destination buffer */ if (read_attribute(cursor, dst, length) != 0) { return -1; } dst[length] = '\0'; } else { char *buffer; /* we first read the whole string and then return only the requested part */ buffer = malloc(length + 1); /* add 1, because the AN interface depend on this additional byte */ if (buffer == NULL) { coda_set_error(CODA_ERROR_OUT_OF_MEMORY, "out of memory (could not allocate %lu bytes) (%s:%u)", length + 1, __FILE__, __LINE__); return -1; } if (read_attribute(cursor, buffer, length) != 0) { free(buffer); return -1; } memcpy(dst, buffer, dst_size - 1); free(buffer); dst[dst_size - 1] = '\0'; } } else { /* basic type should be a single character, so the string length is always 1 */ if (dst_size > 1) { if (coda_hdf4_cursor_read_char(cursor, dst) != 0) { return -1; } dst[1] = '\0'; } else if (dst_size == 1) { dst[0] = '\0'; } } return 0; }
Compartment_Report_HDF5_File_Reader::Compartment_Report_HDF5_File_Reader (const Report_Specification & specs) : _path(uri_to_filename(specs.data_source())), _report_name(specs.label()), _sequence_start_frame(0), _frame_counter(0), _frame_skip(1.0), _current_framestamp(UNDEFINED_FRAME_NUMBER) { namespace fs = boost::filesystem; static Report_Specialization_Register< Compartment_Report_HDF5_File_Reader, float > register_float_buffer; // Finding a suitable cell name from which has a h5 inside the search path. bool h5_file_found = false; if (!fs::is_directory(_path)) { throw_exception( IO_Error("Compartment_Report_HDF5_File_Reader: data source " "is not a directory: " + specs.data_source()), FATAL_LEVEL, __FILE__, __LINE__); } fs::directory_iterator entry(_path), end_entry; Cell_GID cell_GID = UNDEFINED_CELL_GID; while (cell_GID == UNDEFINED_CELL_GID && entry != end_entry) { fs::path filename = entry->path(); std::string cell_name = fs::basename(filename); char * endptr; if (fs::is_regular(entry->status()) && fs::extension(filename) == ".h5" && // Checking if name matches a[0-9]+ pattern and storing the GID cell_name.size() > 1 && cell_name[0] == 'a' && (cell_GID = strtol(&cell_name[1], &endptr, 10)) > 0 && *endptr == '\0') h5_file_found = true; ++entry; } if (cell_GID == UNDEFINED_CELL_GID) { throw_exception( Bad_Data("Compartment_Report_HDF5_File_Reader: source path " "doesn't contain any valid .h5 file" + specs.data_source()), FATAL_LEVEL, __FILE__, __LINE__); } // Not catching any exception here H5ID file, dataset; open_data_set(cell_GID, "data", file, dataset); float start_time, end_time, delta_time; if (// Trying to read attributes !read_attribute("tstart", dataset, start_time) || !read_attribute("tstop", dataset, end_time) || !read_attribute("Dt", dataset, delta_time) || // And checking them start_time != specs.start_time() || end_time != specs.end_time() || delta_time != specs.timestep()) { /*! \todo Exception commented because the forward_skip is not taken into account in the HDF5. SL - 24.07.08 throw_exception( Bad_Data("Compartment_Report_HDF5_File_Reader: inconsistent" " report metadata found for '" + specs.label() + "' in path " + specs.data_source()), FATAL_LEVEL, __FILE__, __LINE__); */ } }
AST_expr* readASTExpr(BufferedReader* reader) { uint8_t type = reader->readByte(); if (VERBOSITY("parsing") >= 3) printf("type = %d\n", type); if (type == 0) return NULL; uint8_t checkbyte = reader->readByte(); assert(checkbyte == 0xae); switch (type) { case AST_TYPE::Attribute: return read_attribute(reader); case AST_TYPE::BinOp: return read_binop(reader); case AST_TYPE::BoolOp: return read_boolop(reader); case AST_TYPE::Call: return read_call(reader); case AST_TYPE::Compare: return read_compare(reader); case AST_TYPE::Dict: return read_dict(reader); case AST_TYPE::DictComp: return read_dictcomp(reader); case AST_TYPE::ExtSlice: return read_extslice(reader); case AST_TYPE::GeneratorExp: return read_generatorexp(reader); case AST_TYPE::IfExp: return read_ifexp(reader); case AST_TYPE::Index: return read_index(reader); case AST_TYPE::Lambda: return read_lambda(reader); case AST_TYPE::List: return read_list(reader); case AST_TYPE::ListComp: return read_listcomp(reader); case AST_TYPE::Name: return read_name(reader); case AST_TYPE::Num: return read_num(reader); case AST_TYPE::Repr: return read_repr(reader); case AST_TYPE::Set: return read_set(reader); case AST_TYPE::SetComp: return read_setcomp(reader); case AST_TYPE::Slice: return read_slice(reader); case AST_TYPE::Str: return read_str(reader); case AST_TYPE::Subscript: return read_subscript(reader); case AST_TYPE::Tuple: return read_tuple(reader); case AST_TYPE::UnaryOp: return read_unaryop(reader); case AST_TYPE::Yield: return read_yield(reader); default: fprintf(stderr, "Unknown expr node type (parser.cpp:" STRINGIFY(__LINE__) "): %d\n", type); abort(); break; } }
/* Get the next entry; * Returns (-1)=EOF, (-2)=Again, 0=OK or an error code * *cookie=16 is the first entry * A pointer to the entry is written to *entry * bothnames!=0: List both long and 8.3 file names */ static int get_next_direntry(pinode inode,unsigned *cookie, const unsigned char **entry,int bothnames) { int offset,i,idx; /* Entry is beyond the directorie's end */ if(((*cookie)>>16)>inode->ntree) return (-1); /* Ensure the tree has been read */ i=load_tree_nodes(inode); if(i) return i; /* Entry in the index root */ if(*cookie<0x10000) { offset=(*cookie)-16; *entry=find_attribute(inode->mftentry,0x90); (*entry)+=0x18+2*(*entry)[9]; (*entry)+=getlittle32(*entry,16)+16; } /* Entry in the index store */ else { /* No store for the current index, yet. */ if(!inode->curidx) { inode->curidx=malloc(inode->clusttree); if(!inode->curidx) return 5; inode->ncuridx=(-1); } /* The currently loaded index is not the required one. */ idx=(*cookie)>>16; if(inode->ncuridx!=idx) { inode->ncuridx=(-1); i=read_attribute(inode,0xA0, (long long)(inode->treedata[idx-1])*clustlen, inode->curidx,inode->clusttree,0); if(i!=inode->clusttree) return 5; if(i<0) return -i; if(do_fixups(inode->curidx,inode->clusttree)) return 5; } offset=(*cookie)&0xFFFF; *entry=inode->curidx+getlittle32(inode->curidx,0x18)+0x18; } /* Get the (offset+1)-th entry */ for(;offset>0;offset--) { /* End of the current index found */ if((*entry)[12]&2) { (*cookie)+=0x10000; (*cookie)&=~0xFFFF; return (-2); } /* Go to the next entry */ (*entry)+=getlittle16(*entry,8); } /* The end-of-node index does not contain a file. */ if((*entry)[12]&2) { (*cookie)+=0x10000; (*cookie)&=~0xFFFF; return (-2); } /* The next entry */ (*cookie)++; /* Is it a special entry ? */ if(!memcmp((*entry)+1,"\0\0\0\0\0",5)) if(**entry<16 && **entry!=5) return (-2); /* Is it an 8.3 file name of a file with long file name * or a long file name => check "usedosnames" */ if(!bothnames) if(((*entry)[0x51]==2 && !usedosnames) || ((*entry)[0x51]==1 && usedosnames)) return (-2); return 0; }
/* Recursively search a directory for subitems * Note that a directory is organized as a tree * in NTFS. Drawback: The directory must be read * once completely before the first record can * be read. Advantage: Less complex implementation * * The function is called recursively and by load_tree_nodes() * It is not called from another context. */ static int find_tree_nodes(pinode inode,const unsigned char *entry,int maxlen) { unsigned char *indexblock; const unsigned char *ptr; int l,i,fnp,blockat,nmaxlen; indexblock=NULL; while(1) { if(maxlen<0x10) { free(indexblock); return 5; } l=getlittle16(entry,8); #if 0 /* The documentation from sourceforge * seems to be wrong */ fnp=10+getlittle16(entry,10); #else /* The file name is always at 0x52 !! */ fnp=0x52; #endif if((entry[12]&1) && inode->ntree<0xFFF0) { if(!indexblock) { indexblock=malloc(inode->clusttree); if(!indexblock) return 5; } if(entry[12]&2) ptr=entry+0x10; else ptr=entry+((2*entry[fnp-2]+fnp+7)&~7); if(memcmp(ptr+2,"\0\0\0\0",4)) { free(indexblock); return 5; } blockat=getlittle32(ptr,0); inode->treedata[inode->ntree++]=(unsigned short)blockat; i=read_attribute(inode,0xA0,(long long)blockat*clustlen, indexblock,inode->clusttree,0); if(i!=inode->clusttree) i=-5; if(!i) if(memcmp(indexblock,"INDX",4)) i=-5; if(i<0) { free(indexblock); return -i; } if(do_fixups(indexblock,inode->clusttree)) return 5; ptr=indexblock+getlittle32(indexblock,0x18)+0x18; nmaxlen=getlittle32(indexblock,0x1C); i=find_tree_nodes(inode,ptr,nmaxlen); if(i) { free(indexblock); return i; } } if(entry[12]&2) { free(indexblock); return 0; } entry+=l; maxlen-=l; } }
/* Read an attribute or a part of it. * Note: In NTFS the entire file contents is an attribute * typ is the attribute type; the attribute's name is ignored. * The name is used to distinguish between different attributes * of the same type; example: Converting a HFS file system to * NTFS => All files have a "" and a "resource" attribute of * the type 0x80 representing the data and the resource fork. * lvl is used for recursive calls and must be 0 otherwise. */ static int read_attribute(pinode inode,int typ,long long offset,void *dest, unsigned len,int lvl) { long long cluster,ll; unsigned off,pos,attpos,attllen,couldadddata; const unsigned char *attrib,*cldata; unsigned char *attlist; pinode combinode; int i,onlylisted; /* Too many recursions */ if(lvl>10) return -5; /* Definition: offset<0 returns 0 * This is useful for the recursive calls with $ATTRIBUTE_LISTs */ if(offset<0) return 0; /* Find the attribute */ attrib=find_attribute(inode->mftentry,typ); /* It is not in the MFT - maybe it is only stored * indirectly in an $ATTRIBUTE_LIST (see splitting * inodes below) */ if(!attrib) { pos=0; onlylisted=1; } /* The attribute is stored directly */ else if(!attrib[8]) { onlylisted=0; ll=getlittle32(attrib,0x10)-offset; if(ll<0) ll=0; else if(len<ll) ll=len; pos=(unsigned)ll; memcpy(dest,attrib+getlittle16(attrib,0x14)+(int)offset,pos); } /* The attribute is stored anywhere in the file system */ else { onlylisted=0; /* Get the first cluster */ cluster=offset/clustlen; off=(unsigned)(offset%clustlen); attrib+=getlittle16(attrib,0x20); ll=rel_to_cluster(attrib,cluster); if(ll<-1ll) pos=0; else { cldata=get_cluster(ll); if(!cldata) return -5; /* Copy all bytes */ for(pos=0;pos<len;pos++) { if(off>=clustlen) { ll=rel_to_cluster(attrib,++cluster); if(ll<-1ll) break; else { cldata=get_cluster(ll); if(!cldata) return -5; off=0; } } ((unsigned char *)dest)[pos]=cldata[off++]; } } } /* NTFS is able to split file entries (inodes) * into multiple inodes; an $ATTRIBUTE_LIST is used to do this! * Concatenate them here ! * Return if this is not needed. * typ==20: Nested $ATTRIBUTE_LISTs are not supported. */ if(pos>=len || typ==0x20) return len; /* OK. The data needed is found in another inode. * Load the attribute list. */ attrib=find_attribute(inode->mftentry,0x20); if(!attrib) return pos; attllen=getlittle32(attrib,attrib[8]?0x30:0x10); attlist=malloc(attllen); if(!attlist) return pos; if(read_attribute(inode,0x20,0,attlist,attllen,lvl)!=attllen) { free(attlist); return pos; } /* Add data from the other inodes */ do { couldadddata=0; for(attpos=0;attpos+0x10<=attllen && len>pos;attpos+=i) { if(getlittle32(attlist,attpos)==typ) { ll=getlittle64(attlist,attpos+8); if(ll || onlylisted) if(offset+pos>=ll*clustlen) { i=get_inode(getlittle48(attlist,attpos+16),&combinode,1); if(i) { free(attlist); return -i; } i=read_attribute(combinode,typ, offset+pos-getlittle64(attlist,attpos+8)*clustlen, ((char *)dest)+pos,len-pos,lvl+1); if(i<0) { free(attlist); return -i; } else if(i>0) { pos+=i; couldadddata=1; } } } i=getlittle16(attlist,attpos+4); if(!i) break; } } while(couldadddata && len>pos); free(attlist); return pos; }
jint Parse_Options(JavaVM *vm, JNIEnv *env, jvmtiEnv *jvmti, const char *agent){ PORT_ACCESS_FROM_JAVAVM(vm); VMI_ACCESS_FROM_JAVAVM(vm); AgentList *new_elem = (AgentList *)hymem_allocate_memory(sizeof(AgentList)); char *agent_cpy = (char *)hymem_allocate_memory(sizeof(char)*(strlen(agent)+1)); char *jar_name, *manifest; char *options = NULL; char *class_name, *bootclasspath, *str_support_redefine; char *bootclasspath_item; char *classpath; char *classpath_cpy; int support_redefine = 0; char *pos; char *lwrmanifest; strcpy(agent_cpy, agent); //parse jar name and options pos = strchr(agent_cpy, '='); if(pos>0){ *pos++ = 0; options = (char *)hymem_allocate_memory(sizeof(char) * (strlen(pos)+1)); strcpy(options, pos); } jar_name =agent_cpy; //read jar files, find manifest entry and read bytes //read attributes(premain class, support redefine, bootclasspath) manifest = Read_Manifest(vm,env, jar_name); lwrmanifest = (char *)hymem_allocate_memory(sizeof(char) * (strlen(manifest)+1)); strcpy(lwrmanifest,manifest); strlower(lwrmanifest); //jar itself added to bootclasspath check_jvmti_error(env, (*jvmti)->GetSystemProperty(jvmti,"java.class.path",&classpath),"Failed to get classpath."); classpath_cpy = (char *)hymem_allocate_memory((sizeof(char)*(strlen(classpath)+strlen(jar_name)+2))); strcpy(classpath_cpy,classpath); #if defined(WIN32) || defined(WIN64) strcat(classpath_cpy,";"); #else strcat(classpath_cpy,":"); #endif strcat(classpath_cpy,jar_name); check_jvmti_error(env, (*jvmti)->SetSystemProperty(jvmti, "java.class.path",classpath_cpy),"Failed to set classpath."); hymem_free_memory(classpath_cpy); hymem_free_memory(jar_name); //save options, save class name, add to agent list class_name = read_attribute(vm, manifest, lwrmanifest,"premain-class"); if(NULL == class_name){ hymem_free_memory(lwrmanifest); hymem_free_memory(manifest); (*env)->FatalError(env,"Cannot find Premain-Class attribute."); } new_elem->option = options; new_elem->class_name = class_name; new_elem->next = NULL; tail->next = new_elem; tail = new_elem; //calculate support redefine str_support_redefine = read_attribute(vm, manifest, lwrmanifest,"can-redefine-classes"); if(NULL != str_support_redefine){ support_redefine = str2bol(str_support_redefine); hymem_free_memory(str_support_redefine); } gsupport_redefine &= support_redefine; //add bootclasspath bootclasspath = read_attribute(vm, manifest, lwrmanifest,"boot-class-path"); if (NULL != bootclasspath){ #if defined(WIN32) || defined(WIN64) // On Windows the agent jar path can have a mixture of forward and back slashes. // For ease, convert forward slashes to back slashes char *currentSlash = strchr(jar_name, '/'); while (currentSlash) { *currentSlash = '\\'; currentSlash = strchr(currentSlash, '/'); } #endif bootclasspath_item = strtok(bootclasspath, " "); while(NULL != bootclasspath_item){ if ((bootclasspath_item[0] != DIR_SEPARATOR) && (strrchr(jar_name, DIR_SEPARATOR))) { // This is not an absolute path, so add this relative path to the path of the agent library int lastSeparatorOff = strrchr(jar_name, DIR_SEPARATOR) - jar_name + 1; int size = lastSeparatorOff + strlen(bootclasspath_item) + 1; char *jarPath = (char *)hymem_allocate_memory(size); memcpy(jarPath, jar_name, lastSeparatorOff); strcpy(jarPath + lastSeparatorOff, bootclasspath_item); check_jvmti_error(env, (*jvmti)->AddToBootstrapClassLoaderSearch(jvmti, jarPath),"Failed to add bootstrap classpath."); hymem_free_memory(jarPath); } else { // This is either an absolute path of jar_name has not path before the filename check_jvmti_error(env, (*jvmti)->AddToBootstrapClassLoaderSearch(jvmti, bootclasspath_item),"Failed to add bootstrap classpath."); } bootclasspath_item = strtok(NULL, " "); } hymem_free_memory(bootclasspath); } hymem_free_memory(lwrmanifest); hymem_free_memory(manifest); return 0; }
/* * This function is to verify the data from multiple group testing. It opens * every dataset in every group and check their correctness. * * Changes: Updated function to use a dynamically calculated size, * instead of the old SIZE #define. This should allow it * to function with an arbitrary number of processors. * * JRM - 8/11/04 */ void multiple_group_read(void) { int mpi_rank, mpi_size, error_num, size; int m; hbool_t use_gpfs = FALSE; char gname[64]; hid_t plist, fid, gid, memspace, filespace; hsize_t chunk_origin[DIM]; hsize_t chunk_dims[DIM], file_dims[DIM], count[DIM]; const H5Ptest_param_t *pt; char *filename; int ngroups; pt = GetTestParameters(); filename = pt->name; ngroups = pt->count; MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank); MPI_Comm_size(MPI_COMM_WORLD, &mpi_size); size = get_size(); plist = create_faccess_plist(MPI_COMM_WORLD, MPI_INFO_NULL, facc_type, use_gpfs); fid = H5Fopen(filename, H5F_ACC_RDONLY, plist); H5Pclose(plist); /* decide hyperslab for each process */ get_slab(chunk_origin, chunk_dims, count, file_dims, size); /* select hyperslab for memory and file space */ memspace = H5Screate_simple(DIM, file_dims, NULL); H5Sselect_hyperslab(memspace, H5S_SELECT_SET, chunk_origin, chunk_dims, count, chunk_dims); filespace = H5Screate_simple(DIM, file_dims, NULL); H5Sselect_hyperslab(filespace, H5S_SELECT_SET, chunk_origin, chunk_dims, count, chunk_dims); /* open every group under root group. */ for(m=0; m<ngroups; m++) { sprintf(gname, "group%d", m); gid = H5Gopen(fid, gname); VRFY((gid > 0), gname); /* check the data. */ if(m != 0) if( (error_num = read_dataset(memspace, filespace, gid))>0) nerrors += error_num; /* check attribute.*/ error_num = 0; if( (error_num = read_attribute(gid, is_group, m))>0 ) nerrors += error_num; H5Gclose(gid); #ifdef BARRIER_CHECKS if(!((m+1)%10)) MPI_Barrier(MPI_COMM_WORLD); #endif /* BARRIER_CHECKS */ } /* open all the groups in vertical direction. */ gid = H5Gopen(fid, "group0"); VRFY((gid>0), "group0"); recursive_read_group(memspace, filespace, gid, 0); H5Gclose(gid); H5Sclose(filespace); H5Sclose(memspace); H5Fclose(fid); }