void CompactIndex::initializeForQuerying() { readOnly = true; fileHandle = open(fileName, O_RDONLY | O_LARGEFILE); if (fileHandle < 0) { snprintf(errorMessage, sizeof(errorMessage), "Unable to open on-disk index: %s", fileName); log(LOG_ERROR, LOG_ID, errorMessage); perror(NULL); exit(1); } // create File object to be used by all posting lists; initial usage count: 1 // setting the usage count to 1 makes sure the object is not destroyed by // its children (see FileFile for details) baseFile = new FileFile(fileName, (off_t)0, 1); readRawData(getByteSize() - sizeof(header), &header, sizeof(header)); long long descSize = header.descriptorCount * sizeof(CompactIndex_BlockDescriptor); descriptorSlotCount = header.descriptorCount; descriptors = typed_malloc(CompactIndex_BlockDescriptor, descriptorSlotCount + 1); readRawData(getByteSize() - sizeof(header) - descSize, descriptors, descSize); long long pc = header.postingCount; sprintf(errorMessage, "On-disk index loaded: %s", fileName); log(LOG_DEBUG, LOG_ID, errorMessage); sprintf(errorMessage, " terms: %d, segments: %d, postings: %lld, descriptors: %d (%lld bytes)", header.termCount, header.listCount, pc, header.descriptorCount, descSize); log(LOG_DEBUG, LOG_ID, errorMessage); } // end of initializeForQuerying()
void StackInfo::write(U_8* bytes) { U_8* data = bytes; StackInfo* serializedInfo = (StackInfo*)data; *serializedInfo = *this; serializedInfo->byteSize = getByteSize(); data+=sizeof(StackInfo); MemoryManager mm("DepthInfo"); EntryPtr * entries = new(mm) EntryPtr[hashTableSize]; for(U_32 i = 0; i< hashTableSize; i++) entries[i] = NULL; for(DepthMap::iterator dmit = stackDepthInfo->begin(); dmit != stackDepthInfo->end(); dmit++) { hashSet(entries, dmit->first, hashTableSize, dmit->second, mm); } U_8* next = data + hashTableSize * sizeof(POINTER_SIZE_INT); for(U_32 i = 0; i< hashTableSize; i++) { DepthEntry * e = entries[i]; POINTER_SIZE_INT serializedEntryAddr = 0; if(entries[i]) { serializedEntryAddr = (POINTER_SIZE_INT)next; for(; e != NULL; e = e->next) { DepthEntry* serialized = (DepthEntry*)next; *serialized = *e; next+=sizeof(DepthEntry); serialized->next = e->next ? (DepthEntry*)next : NULL; } } *((POINTER_SIZE_INT*)data)= serializedEntryAddr; data+=sizeof(POINTER_SIZE_INT); } assert(getByteSize() == (POINTER_SIZE_INT) (((U_8*)next) - bytes)); }
//_________________________________________________________________________________________________ void CallingConventionClient::layoutAuxilaryOpnds(Inst::OpndRole role, OpndKind kindForStackArgs) { StlVector<CallingConvention::OpndInfo> & infos = getInfos(role); StlVector<StackOpndInfo> & stackOpndInfos = getStackOpndInfos(role); U_32 slotSize=sizeof(POINTER_SIZE_INT); U_32 regArgCount=0, stackArgCount=0; Inst::Opnds opnds(ownerInst, Inst::OpndRole_Auxilary|role); Inst::Opnds::iterator handledOpnds=opnds.begin(); for (U_32 i=0, n=(U_32)infos.size(); i<n; i++){ const CallingConvention::OpndInfo& info=infos[i]; #ifdef _DEBUG bool eachSlotRequiresOpnd=false; #endif U_32 offset=0; for (U_32 j=0, cbCurrent=0; j<info.slotCount; j++){ Opnd * opnd=opnds.getOpnd(handledOpnds); OpndSize sz=opnd->getSize(); U_32 cb=getByteSize(sz); RegName r=(RegName)info.slots[j]; if (info.isReg) { r=Constraint::getAliasRegName(r,sz); assert(r!=RegName_Null); #ifdef _DEBUG eachSlotRequiresOpnd=true; #endif cbCurrent+=getByteSize(getRegSize(r)); }else{ if (cbCurrent==0) offset=(info.slots[j] & 0xffff)*slotSize; cbCurrent+=slotSize; } if (cbCurrent>=cb){ if (info.isReg){ ownerInst->setConstraint(handledOpnds, r); regArgCount++; }else{ ownerInst->setConstraint(handledOpnds, Constraint(kindForStackArgs, sz)); stackArgCount++; StackOpndInfo sainfo={ handledOpnds, offset }; stackOpndInfos.push_back(sainfo); } handledOpnds = opnds.next(handledOpnds); #ifdef _DEBUG eachSlotRequiresOpnd=false; #endif cbCurrent=0; } #ifdef _DEBUG assert(!eachSlotRequiresOpnd); #endif } } if (stackArgCount>0) sort(stackOpndInfos.begin(), stackOpndInfos.end()); assert(handledOpnds == opnds.end()); assert(stackArgCount==stackOpndInfos.size()); }
virtual std::size_t writeSerialized(uint8_t* buf,std::size_t len) { if(cached_size>0) { if(len<cached_size) return 0; } else { if(len<getByteSize()) return 0; } uint8_t*c=buf; //c=writeChunk<MessageEntry::msgclass_t>(c,len-(c-buf),m->msgclass); c=classcoder.write(c,len-(c-buf)); if(Traits::hasTopic==true) c=topiccoder.write(c,len-(c-buf)); //writeChunk<typename Traits::topic_t>(c,len-(c-buf),(typename Traits::topic_t)m->topic); if(Traits::hasTimeStamp==true) c=timestampcoder.write(c,len-(c-buf)); c=typenamecoder.write(c,len-(c-buf)); if(m->msg) { c=writeBytes(c,(const uint8_t*)m->msg->GetTypeName().data(),typenamecoder.uncoded); std::size_t msgbytes=m->msg->ByteSize(); //std::cout<<"Message bytesize"<<msgbytes<<std::endl; //std::cout<<"Message bytesize"<<msgbytes<<" "<<(c-buf)<<" "<<len<<std::endl; if(m->msg->SerializePartialToArray(c, len-(c-buf))) { c+=msgbytes; //std::cout<<"Serialization succeded"<<std::endl; } } return c-buf; };
Interface::Interface(FILE* stream) { fread(&size, sizeof(unsigned), 1, stream); fread(&bufferType, sizeof(BufferType), 1, stream); unsigned byteSize = getByteSize(); data = MemoryManagement::malloc(byteSize); fread(data, byteSize, 1, stream); }
Interface::Interface(Interface* toCopy) { this->size = toCopy->getSize(); this->bufferType = toCopy->getBufferType(); size_t byteSize = getByteSize(); data = MemoryManagement::malloc(byteSize); memcpy(data, toCopy->getDataPointer(), byteSize); }
Interface::Interface(unsigned size, BufferType bufferType) { this->size = size; this->bufferType = bufferType; size_t byteSize = getByteSize(); data = MemoryManagement::malloc(byteSize); reset(); }
void Interface::copyFromFast(Interface *other) { if (size != other->getSize()) { std::string error = "The sizes of the interfaces are different."; throw error; } if (bufferType != other->getBufferType()) { std::string error = "The Types of the Interfaces are different."; throw error; } memcpy(data, other->getDataPointer(), getByteSize()); }
void Interface::load(FILE* stream) { unsigned size2; BufferType bufferType2; fread(&size2, sizeof(unsigned), 1, stream); fread(&bufferType2, sizeof(BufferType), 1, stream); if (size2 != size) { std::string error = "The size of the Interface is different than the size to load."; throw error; } if (bufferType2 != bufferType) { std::string error = "The Type of the Interface is different than the Buffer Type to load."; throw error; } fread(data, getByteSize(), 1, stream); }
void Interface::reset() { switch (bufferType) { case BT_FLOAT: case BT_FLOAT_SMALL: for (unsigned i = 0; i < size; i++) { ((float*) (data))[i] = 0; } break; case BT_BYTE: case BT_BIT: case BT_SIGN: for (unsigned i = 0; i < getByteSize(); i++) { ((unsigned char*) (data))[i] = 0; } break; } }
void CompactIndex::loadIndexIntoMemory() { // load the entire index into RAM totalSize = getByteSize(); inMemoryIndex = (char*)malloc(totalSize); lseek(fileHandle, (off_t)0, SEEK_SET); int64_t done = 0; int64_t toDo = totalSize; static const int BUFFER_SIZE = 256 * 1024; while (toDo > 0) { if (toDo < BUFFER_SIZE) toDo -= forced_read(fileHandle, &inMemoryIndex[done], toDo); else { int result = forced_read(fileHandle, &inMemoryIndex[done], BUFFER_SIZE); assert(result == BUFFER_SIZE); done += result; toDo -= result; } } } // end of loadIndexIntoMemory()
void TerrainDemo::clientMoveAndDisplay(void) { // elapsed time float us = getDeltaTimeMicroseconds(); float seconds = 1.0e-6 * us; // we'll carefully iterate through each time step so we can update // the dynamic model if necessary long nStepsPerIteration = 1; while (seconds > 1.0e-6) { float dt = nStepsPerIteration * s_engineTimeStep; if (dt > seconds) { dt = seconds; } seconds -= dt; // std::cerr << " Stepping through " << dt << " seconds\n"; // if dynamic and radial, go ahead and update the field if (m_rawHeightfieldData && m_isDynamic && eRadial == m_model) { m_phase += s_deltaPhase * dt; if (m_phase > 2.0 * SIMD_PI) { m_phase -= 2.0 * SIMD_PI; } int bpe = getByteSize(m_type); btAssert(bpe > 0 && "Bad bytes per element"); setRadial(m_rawHeightfieldData, bpe, m_type, m_phase); } if (m_dynamicsWorld) { m_dynamicsWorld->stepSimulation(dt, nStepsPerIteration + 1, s_engineTimeStep); } } // okay, render glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT); renderme(); glFlush(); glutSwapBuffers(); }
static float getGridHeight ( byte_t * grid, int i, int j, PHY_ScalarType type ) { btAssert(grid); btAssert(i >= 0 && i < s_gridSize); btAssert(j >= 0 && j < s_gridSize); int bpe = getByteSize(type); btAssert(bpe > 0 && "bad bytes per element"); int idx = (j * s_gridSize) + i; long offset = ((long) bpe) * idx; byte_t * p = grid + offset; return convertToFloat(p, type); }
void Interface::save(FILE* stream) { fwrite(&size, sizeof(unsigned), 1, stream); fwrite(&bufferType, sizeof(BufferType), 1, stream); fwrite(data, getByteSize(), 1, stream); }
static byte_t * getRawHeightfieldData ( eTerrainModel model, PHY_ScalarType type, btScalar& minHeight, btScalar& maxHeight ) { // std::cerr << "\nRegenerating terrain\n"; // std::cerr << " model = " << model << "\n"; // std::cerr << " type = " << type << "\n"; long nElements = ((long) s_gridSize) * s_gridSize; // std::cerr << " nElements = " << nElements << "\n"; int bytesPerElement = getByteSize(type); // std::cerr << " bytesPerElement = " << bytesPerElement << "\n"; btAssert(bytesPerElement > 0 && "bad bytes per element"); long nBytes = nElements * bytesPerElement; // std::cerr << " nBytes = " << nBytes << "\n"; byte_t * raw = new byte_t[nBytes]; btAssert(raw && "out of memory"); // reseed randomization every 30 seconds // srand(time(NULL) / 30); // populate based on model switch (model) { case eRadial: setRadial(raw, bytesPerElement, type); break; case eFractal: for (int i = 0; i < nBytes; i++) { raw[i] = 0; } setFractal(raw, bytesPerElement, type, s_gridSize - 1); break; default: btAssert(!"bad model type"); } if (0) { // inside if(0) so it keeps compiling but isn't // exercised and doesn't cause warnings // std::cerr << "final grid:\n"; dumpGrid(raw, bytesPerElement, type, s_gridSize - 1); } // find min/max for (int i = 0; i < s_gridSize; ++i) { for (int j = 0; j < s_gridSize; ++j) { float z = getGridHeight(raw, i, j, type); // std::cerr << "i=" << i << ", j=" << j << ": z=" << z << "\n"; // update min/max if (!i && !j) { minHeight = z; maxHeight = z; } else { if (z < minHeight) { minHeight = z; } if (z > maxHeight) { maxHeight = z; } } } } if (maxHeight < -minHeight) { maxHeight = -minHeight; } if (minHeight > -maxHeight) { minHeight = -maxHeight; } // std::cerr << " minHeight = " << minHeight << "\n"; // std::cerr << " maxHeight = " << maxHeight << "\n"; return raw; }
void CompactIndex2::initializeForQuerying() { readOnly = true; fileHandle = open(fileName, O_RDONLY | O_LARGEFILE); if (fileHandle < 0) { snprintf(errorMessage, sizeof(errorMessage), "Unable to open on-disk index: %s", fileName); log(LOG_ERROR, LOG_ID, errorMessage); perror(NULL); exit(1); } else { // create File object to be used by all posting lists; initial usage count: 1 // setting the usage count to 1 makes sure the object is not destroyed by // its children (see FileFile for details) baseFile = new FileFile(fileName, (off_t)0, 1); } // read header from end of file readRawData(getByteSize() - sizeof(header), &header, sizeof(header)); // read compressed descriptor sequence usedByDescriptors = allocatedForDescriptors = header.compressedDescriptorSize; endOfPostingsData = getByteSize() - sizeof(header) - usedByDescriptors; compressedDescriptors = (byte*)malloc(usedByDescriptors); readRawData(endOfPostingsData, compressedDescriptors, usedByDescriptors); sprintf(errorMessage, "On-disk index loaded: %s", fileName); log(LOG_DEBUG, LOG_ID, errorMessage); sprintf(errorMessage, " terms: %lld, segments: %lld, postings: %lld, descriptors: %lld (%d bytes)", (long long)header.termCount, (long long)header.listCount, (long long)header.postingCount, (long long)header.descriptorCount, usedByDescriptors); log(LOG_DEBUG, LOG_ID, errorMessage); // build search array from compressed descriptor sequence dictionaryGroupCount = (header.descriptorCount + DICTIONARY_GROUP_SIZE - 1) / DICTIONARY_GROUP_SIZE; groupDescriptors = typed_malloc(CompactIndex2_DictionaryGroup, dictionaryGroupCount + 1); int64_t filePos = 0; char prevTerm[MAX_TOKEN_LENGTH * 2] = { 0 }; uint32_t inPos = 0; for (int i = 0; i < dictionaryGroupCount; i++) { assert(inPos < 2000000000); offset delta; groupDescriptors[i].groupStart = inPos; inPos += decodeFrontCoding(&compressedDescriptors[inPos], prevTerm, groupDescriptors[i].groupLeader); inPos += decodeVByteOffset(&delta, &compressedDescriptors[inPos]); filePos += delta; groupDescriptors[i].filePosition = filePos; strcpy(prevTerm, groupDescriptors[i].groupLeader); for (int k = 1; (k < DICTIONARY_GROUP_SIZE) && (inPos < usedByDescriptors); k++) { char term[MAX_TOKEN_LENGTH * 2]; inPos += decodeFrontCoding(&compressedDescriptors[inPos], prevTerm, term); inPos += decodeVByteOffset(&delta, &compressedDescriptors[inPos]); strcpy(prevTerm, term); filePos += delta; } } // end for (int i = 0; i < dictionaryGroupCount; i++) temporaryPLSH = NULL; } // end of initializeForQuerying()