bool sfheader :: readWavHeader(ifstream &f, const char *name) { bool formatFound = false; unsigned char format[16]; unsigned char data[12]; f.read(data,12); if (!assertWarning(f.gcount() == 12,"Error reading header") || !assertWarning(strncmp((char*)data, "RIFF", 4) == 0, "not RIFF file") || !assertWarning(strncmp((char*)data+8, "WAVE", 4) == 0, "not WAV file")) return false; while (!f.eof()) { // read chunks until data is found f.read(data,8); if (!assertWarning(f.gcount() == 8,"Error reading header")) return false; int chunkLength = sRead32LE(data+4); if (strncmp((char*)data, "data", 4) == 0) { if (!assertWarning(formatFound, "No format data in WAV file")) return false; init(chunkLength / (sizeof(audioSample) * sRead16LE(format+2)), sRead32LE(format+4), sRead16LE(format), sRead16LE(format+2), 0, 0, name); return true; } else if (strncmp((char*)data, "fmt ", 4) == 0) { f.read(format,16); if (!assertWarning(f.gcount()==16, "Error in WAV format data") || !assertWarning(sRead16LE(format) == WAV_LINEAR_PCM, "not WAV linear PCM format") || !assertWarning(sRead16LE(format+14) == 16, "not 16 bit format")) return false; f.seekg(chunkLength-16, ios::cur); formatFound = true; } else f.seekg(chunkLength, ios::cur); } return assertWarning(false, "WAV data not found"); } // readWavHeader()
bool MlMaximumEntropyModel::readModel(ifstream& ifs) { char buffer[256]; while (ifs.good()) { ifs.getline(buffer,256); if (ifs.gcount()>0 && buffer[0]!='#') break; } unsigned int numClasses=0; if (sscanf(buffer,"MAXIMUM_ENTROPY %u",&numClasses) != 1) { cout << "Bad line in model file:" << endl << buffer << endl; return false; } weights_.resize(numClasses); for (size_t c=0; c<numClasses; c++) { ifs.getline(buffer,256); unsigned int numWeights=0; if (sscanf(buffer,"%u",&numWeights) != 1) { cout << "Bad line in model file:" << endl << buffer << endl; return false; } weights_[c].resize(numWeights,0.0); while (ifs.good()) { ifs.getline(buffer,256); if (! strncpy(buffer,"END_",4)) break; if (ifs.gcount() == 0 || buffer[0] != 'F') continue; size_t index; float weight; istringstream iss(buffer+1); iss >> index >> weight; if (iss.fail()) { if (strlen(buffer)<3) continue; cout << "Bad line in model file:" << endl << buffer << endl; return false; } if (index>weights_[c].size()) error("Bad feature index in line: ",buffer); weights_[c][index]=weight; } } return true; }
bool lemur::index::InvDocList::binReadC(ifstream& inf) { if (inf.eof()) return false; int diff; inf.read((char*) &uid, sizeof(lemur::api::TERMID_T)); if (!(inf.gcount() == sizeof(lemur::api::TERMID_T))) return false; inf.read((char*) &df, LOC_Tsize); if (!inf.gcount() == LOC_Tsize) return false; inf.read((char*) &diff, LOC_Tsize); if (!inf.gcount() == LOC_Tsize) return false; inf.read((char*) &size, LOC_Tsize); if (!inf.gcount() == LOC_Tsize) return false; // unsigned char* buffer = (unsigned char*) malloc(size); // use new/delete[] so an exception will be thrown if out of memory. unsigned char* buffer = new unsigned char[size]; inf.read((char*) buffer, size); if (!inf.gcount() == size) { resetFree(); return false; } // this should be big enough // begin = (LOC_T*) malloc(size*4); // use new/delete[] so an exception will be thrown if out of memory. begin = new lemur::api::LOC_T[(size*4)/sizeof(lemur::api::LOC_T)]; // decompress it int len = lemur::utility::RVLCompress::decompress_ints(buffer, (int *)begin, size); size = size*4; if (len * LOC_Tsize > size) cerr << "RVLDecompress in DocList buffer overrun!" << endl; lastid = begin + diff; end = begin + len; freq = lastid+1; deltaDecode(); READ_ONLY = false; // free(buffer); delete[](buffer); return true; }
void convertchan2beam( ifstream& datastrm) { ostringstream ossmsg; int pktSize = chanpkt1.getDataSize() + sizeof(beamHdr); for (;;) { datastrm.read((char *) &chanpkt1, pktSize); if (datastrm.gcount() != pktSize) { if (datastrm.eof()) return; // EOF should occur on this read ossmsg << "(1) unexpected data file read count " << dec << datastrm.gcount(); mylog(ossmsg); exit(EXIT_FAILURE); } chanpkt1.marshall(); memcpy( reinterpret_cast<void *>(&beamHdr), reinterpret_cast<void *>(&chanpkt1), sizeof(beamHdr)) ; datastrm.read((char *) &chanpkt2, pktSize); if (datastrm.gcount() != pktSize) { ossmsg << "(2) unexpected data file read count " << dec << datastrm.gcount(); mylog(ossmsg); exit(EXIT_FAILURE); } chanpkt2.marshall(); short2char(reinterpret_cast<signed char *>(beampkt.getData()), reinterpret_cast <signed short *>(chanpkt1.getData()), reinterpret_cast <signed short *>(chanpkt2.getData()), chanpkt1.getDataSize()); // sanity check for 0xaabbccdd endian order value if (beamHdr.order != ATADataPacketHeader::CORRECT_ENDIAN) { mylog("output packet header does not contain 0xaabbccdd endian value"); exit(EXIT_FAILURE); } // fix some of the fields to make packets look like they came // from the beamformer beamHdr.src = ATADataPacketHeader::BEAM_104MHZ; // not really beamHdr.chan = 1; beamHdr.seq = sequence_num++; beamHdr.len = 2048; memcpy( reinterpret_cast<void *>(&beampkt), reinterpret_cast<void *>(&beamHdr), sizeof(beamHdr)); // write the SonATA channelizer compatible packet to stdout cout.write((char *) &beampkt, pktSize); } }
void ConcatenateAllReads(char * array, ifstream& file) { int bufsize = 200000000; char* buff = new char[bufsize]; long counter = 0; file.seekg(0, ios::beg); while(!file.eof()) { cerr<<"begin read"<<endl; file.read(buff,bufsize); int numberofCharRead = file.gcount(); cerr<<"end read"<<endl; for(int i = 0 ; i < numberofCharRead ; i++) { if(buff[i] == 'A' || buff[i] == 'T' || buff[i] == 'G' || buff[i] == 'C' ) { array[counter] = buff[i]; counter++; } } cerr<<"end filter "<<endl; } return; }
// Trimite chunk de fisier void send_file_piece(char *buffer, int cli_sock, ifstream& file) { unsigned int i; bool found = false; for (i = 0; i < clienti.size(); ++i) if (clienti[i].sock == cli_sock) { found = true; cerr << "FOUNDDDD \n"; break; } char bufbig[BUFFILE]; memset(bufbig, 0, BUFFILE); if(found && clienti[i].size_fis) { file.read(bufbig, BUFFILE); clienti[i].size_fis -= file.gcount(); cerr << "Remaining " << clienti[i].size_fis << " bytes to transfer\n"; int n = send(cli_sock, bufbig, sizeof(bufbig), 0); send_verify(n); parse_recv_file(buffer, cli_sock); } }
/* * * @param infile file to read * @param count record count to read * @param data save records * @return real readed counts */ int readDataToInputBuffer(ifstream &infile,int count,Product * data) { //read by page 4KB(4*1000 bytes),40 records take one page,read time=(count-1)/40+1 char buffer[kPageSize+1]; buffer[kPageSize]='\0'; int readTimes=(count-1)/40+1; int i=0,j=0; string line; for (i=0; i<readTimes; i++) { infile.read(buffer, kPageSize); if (infile.gcount()==0) //文件结束 { return 0; } istringstream istrstreamOfBuffer(buffer); while(getline(istrstreamOfBuffer,line)) { istringstream istrstream(line); int ID; float price; istrstream>>ID>>price; //get ID,price data[j].setId(ID); data[j].price=price; j++; } } return j; }
void ConcatenateAllReads(char* array, ifstream& file) { int bufsize = 200000000; //char buff[bufsize]; char* buff = new char[bufsize]; long counter = 0; cout<<"begin read"<<endl; while(!file.eof()) { file.read(buff,bufsize); long numberofCharRead = file.gcount(); for(long i = 0 ; i < numberofCharRead ; i++) { if(buff[i] == 'A' || buff[i] == 'T' || buff[i] == 'G' || buff[i] == 'C' ) { array[counter] = buff[i]; counter++; } } } delete buff; cout<<"end filter "<<endl; return ; }
bool readPlayerFromBinary(Player& player, ifstream& input) { assert(input.good()); Player plr; input.read((char*)&plr, sizeof(plr)); bool result = input.good() && input.gcount() == sizeof(plr); if (result) player = plr; return result; }
size_t FileSize(ifstream& is) { const streampos pos = is.tellg(); is.clear(); is.seekg(is.beg); is.ignore(numeric_limits<streamsize>::max()); const size_t size = is.gcount(); is.clear(); //clear EOF (set by ignore) is.seekg(pos); return size; }
bool sfheader :: readSndHeader(ifstream &f, const char *name) { unsigned char tmp[24]; f.read(tmp,24); if (!assertWarning(f.gcount() == 24,"sfheader: reading header") || !assertWarning(strncmp((char*)tmp, ".snd", 4) == 0, "sfheader: not .snd file")) return false; init(sRead32BE(tmp+8) / (sizeof(audioSample) * sRead32BE(tmp+20)), sRead32BE(tmp+16), sRead32BE(tmp+12), sRead32BE(tmp+20), 0, 0, name); if (!assertWarning(format==SUN_LINEAR_PCM, "sfheader: PCM format expected")) return false; textlength = sRead32BE(tmp+4) - 24; text = new char[textlength+1]; text[textlength] = 0; f.read(text,textlength); if (!assertWarning(f.gcount() == textlength,"sfheader: reading text field")) return false; return true; } // readSndHeader()
void _GetStream(ifstream & fin, string & strBuf) { fin.read(buf, MAX_CHAR_IN); int size = fin.gcount(); buf[size] = 0; strBuf.clear(); strBuf = buf; strlcnt = 0; }
bool lemur::index::InvDocList::binRead(ifstream& inf) { if (inf.eof()) return false; int diff; inf.read((char*) &uid, sizeof(lemur::api::TERMID_T)); if (!(inf.gcount() == sizeof(lemur::api::TERMID_T))) return false; inf.read((char*) &df, LOC_Tsize); if (!inf.gcount() == LOC_Tsize) return false; inf.read((char*) &diff, LOC_Tsize); if (!inf.gcount() == LOC_Tsize) return false; inf.read((char*) &size, LOC_Tsize); if (!inf.gcount() == LOC_Tsize) return false; int s = sizeof(lemur::api::LOC_T)*size; // begin = (LOC_T*) malloc(s); // use new/delete[] so an exception will be thrown if out of memory. begin = new lemur::api::LOC_T[s/sizeof(lemur::api::LOC_T)]; inf.read((char*) begin, s); if (!inf.gcount() == s) { resetFree(); return false; } lastid = begin + diff; end = begin + size; freq = lastid+1; READ_ONLY = false; return true; }
unsigned int BMPReader::readInt(ifstream &file) { char buf[] = "0000"; file.read(buf, 4); if(file.gcount() != 4) { throw runtime_error("111 File not read"); } //cout<<"buf[0] = "<<getUnsignedInt(buf[0])<<" buf[1] = "<<getUnsignedInt(buf[1])<<" buf[2] = "<<getUnsignedInt(buf[2]) // <<" buf[3] = "<<getUnsignedInt(buf[3])<<endl; return getIntInRightOrder(buf); }
/* * Send the next chunk of the firmware file */ bool FirmwareTransferer::SendNextChunk() { uint8_t page[FLASH_PAGE_LENGTH]; m_firmware->read(reinterpret_cast<char*>(page), FLASH_PAGE_LENGTH); std::streamsize size = m_firmware->gcount(); if (!size) { m_sucessful = true; cout << endl; return true; } cout << "."; fflush(stdout); return m_widget->SendMessage(FLASH_PAGE_LABEL, page, size); }
/* 取文件头,中间,末尾三部分数据计算MD5值 */ bool do_md5(ifstream &is, unsigned long long size, string &md5) { MD5_CTX ctx; MD5_Init(&ctx); char tmp[MD5_DATA_LEN] = {0}; is.read(tmp, sizeof(tmp)); unsigned long ret = is.gcount(); MD5_Update(&ctx, tmp, ret); long m = (size - 1) / 2; if (m < 0) m = 0; is.seekg(m, ios_base::beg); is.read(tmp, sizeof(tmp)); ret = is.gcount(); MD5_Update(&ctx, tmp, ret); m = size - MD5_DATA_LEN; if (m < 0) m = 0; is.seekg(m,ios_base::beg); is.read(tmp, sizeof(tmp)); ret = is.gcount(); MD5_Update(&ctx, tmp, ret); unsigned char _md5[MD5_DIGEST_LENGTH] = {0}; MD5_Final(_md5, &ctx); for(int i = 0; i < MD5_DIGEST_LENGTH; i++) { unsigned char c = _md5[i]; int k1 = c >> 4; int k2 = c & 0xF; md5 += k1 >= 10 ? 'a' + (k1-10) : k1 - 0 + '0'; md5 += k2 >= 10 ? 'a' + (k2-10) : k2 - 0 + '0'; } return true; }
void locfile::getcomp(uint8_t &a) { if(bufferi==buffern) { in.read((char *)buffer,sizeof(uint8_t)*2000000); bufferi=0; buffern=(mylong)in.gcount(); if(buffern==0) { buffereof=1; a=0; return; } } a=buffer[bufferi],bufferi++; }
// vrushta map s chestotite na simvolite map <char, unsigned int> frequency_map(ifstream& in) { map <char, unsigned int> freqmap; if (!in) { cout << "could not open for reading " << endl; } char buffer[buf_size]; while(!in.eof()) { in.read(buffer,buf_size); for (int i=0;i<in.gcount();i++) { freqmap[buffer[i]]++; } return freqmap; } }
//namira chestotite na vseki simvol po daden vhoden fail i map void frequency_for_char(ifstream &in, map<char, unsigned int> & freq) { if(!in) { cout << "could not open for reading" << endl; } char buffer[buf_size]; while( !in.eof()) { in.read(buffer,buf_size); for(int i=0;i<in.gcount();i++) { freq[buffer[i]]++; } } in.close(); }
bool ServerProcess::Respond(ifstream& file) { char msg_str[MAXMESGDATA]; int msg_flag; int remaining = qShare_; while (!file.eof()) { remaining = qShare_; while (remaining-- > 0 && !file.eof()) { file.read(msg_str, MAXMESGDATA); msg_flag = (file.eof()) ? -1 : 0; if (!this->Write(msg_str, file.gcount(), msg_flag, client_pid_)) { return false; } } sched_yield(); } return true; }
int do_seq_compress(ifstream & infile, ofstream & outfile, long infilesize, int blockSize) { long leftbytes = infilesize; while(leftbytes > 0) { //1. read file int toreadsize= leftbytes > blockSize ? blockSize : leftbytes; char *readbuf = new (std::nothrow) char[toreadsize]; if(readbuf == nullptr) { cout<<" allocate error!"<<endl; return 1; } infile.read(readbuf, toreadsize); streamsize readsize = infile.gcount(); if(readsize != toreadsize) { cout<<"read error! aborting..."<<endl; return 1; } unsigned int compresssize = toreadsize + 6*1024; char *cmpbuf = new (std::nothrow) char[compresssize]; if(cmpbuf == nullptr) { cout <<" allocate error!"<<endl; return 1; } //2. compress int ret = BZ2_bzBuffToBuffCompress(cmpbuf, &compresssize, readbuf, readsize, BWTblockSize, 0, 30); if (ret != BZ_OK) { cout<<"error during compressing, aborting..."<<endl; return 1; } //3. writefile outfile.write(cmpbuf, compresssize); leftbytes = leftbytes - readsize; } return 0; }
ifstream ifs(filename.c_str()); for (unsigned j = 0; j < _auxFeatures.size(); j++) { ifs >> _auxFeatures[j][i]; DRWN_ASSERT_MSG(!ifs.fail(), "expecting " << _auxFeatures.size() << " features from file text " << filename << " but only read " << j); } ifs.close(); } else if (ext.compare("bin") == 0) { // 32-bit float ifstream ifs(filename.c_str(), ifstream::binary); vector<float> buffer(_auxFeatures.size()); ifs.read((char *)&buffer[0], _auxFeatures.size() * sizeof(float)); DRWN_ASSERT_MSG(!ifs.fail(), "expecting " << _auxFeatures.size() << " features from binary file " << filename << " but only read " << ifs.gcount() / sizeof(float)); ifs.close(); for (unsigned j = 0; j < _auxFeatures.size(); j++) { DRWN_ASSERT_MSG(isfinite(buffer[j]), j << "-th feature is not finite"); _auxFeatures[j][i] = (double)buffer[j]; } } else { DRWN_LOG_FATAL("unknown extension " << AUX_FEATURE_EXT[i]); } } } DRWN_LOG_DEBUG("...allocated " << (_filters.memory() / (1024 * 1024)) << "MB for filter responses and " << (_auxFeatures.size() * AUX_FEATURE_EXT.size() * sizeof(double) / (1024 * 1024)) << "MB for auxiliary features");
bool MlOperatorList::readOperatorList(ifstream& ifs) { char buffer[1024]; while (ifs.good() && ifs.getline(buffer,1024)) if (ifs.gcount()>0 && buffer[0] != '#') break; size_t n=0; if (sscanf(buffer,"%d",&n) != 1) error("expected line with number of operaotrs"); executionOrder_.clear(); executionOrder_.reserve(n); drops_.clear(); indicators_.clear(); normalizations_.clear(); functions_.clear(); splits_.clear(); conditionals_.clear(); for (size_t i=0; i<n; i++) { if (! ifs.good()) return false; ifs.getline(buffer,1024); if (ifs.gcount()<=0) return false; istringstream iss(buffer); char type=' '; iss >> type; switch (type) { case 'D': { size_t dropIdx=0; iss >> dropIdx; if (iss.fail()) error("Error reading line:",buffer); executionOrder_.push_back(IdxPair(OT_DROP,drops_.size())); drops_.push_back(dropIdx); } break; case 'I': { IndicatorOperator iop; iss >> iop.sourceIdx >> iop.targetIdx; if (iss.fail()) error("Error reading line:",buffer); executionOrder_.push_back(IdxPair(OT_INDICATOR,indicators_.size())); indicators_.push_back(iop); break; } case 'F': { FunctionOperator fop; iss >> fop.sourceIdx >> fop.targetIdx; string typeStr; iss >> typeStr; size_t i; for (i=0; i<numConditionalValueLabels; i++) if (! strcmp(typeStr.c_str(),conditionalValueLabels[i])) break; if (i == numConditionalValueLabels) error("Error reading line:",buffer); fop.type = i; executionOrder_.push_back(IdxPair(OT_FUNCTION,functions_.size())); functions_.push_back(fop); } case 'N': { NormalizationOperator nop; iss >> nop.sourceIdx >> nop.targetIdx >> nop.mu >> nop.sigma; if (iss.fail()) error("Error reading line:",buffer); executionOrder_.push_back(IdxPair(OT_NORMALIZATION,normalizations_.size())); normalizations_.push_back(nop); } break; case 'S': { SplitOperator sop; iss >> sop.sourceIdx; size_t n; iss >> n; if (iss.fail()) error("Error reading line:",buffer); sop.thresholds.resize(n); sop.indexesForBinValues.resize(n+1); sop.indexesForBinIndicators.resize(n+1); for (size_t i=0; i<n; i++) iss >> sop.thresholds[i]; for (size_t i=0; i<=n; i++) { // a fix so the model file can use -1 for MAX_UINT (so it won't hurt my eyes...) int index; iss >> index; sop.indexesForBinValues[i] = (index>=0 ? static_cast<size_t>(index) : MAX_UINT); } for (size_t i=0; i<=n; i++) { // a fix so the model file can use -1 for MAX_UINT (so it won't hurt my eyes...) int index; iss >> index; sop.indexesForBinIndicators[i] = (index>=0 ? static_cast<size_t>(index) : MAX_UINT); } if (iss.fail()) error("Error reading line:",buffer); executionOrder_.push_back(IdxPair(OT_SPLIT,splits_.size())); splits_.push_back(sop); } break; case 'C': { ConditionalOperator cod; size_t n; iss >> n; cod.sourceIdxs.resize(n); for (size_t i=0; i<n; i++) iss >> cod.sourceIdxs[i]; iss >> cod.targetIdx; // a fix so the model file can use -1 for MAX_UINT (so it won't hurt my eyes...) int indexForBool; iss >> indexForBool; cod.indexForBool = (indexForBool>=0 ? static_cast<size_t>(indexForBool) : MAX_UINT); string resultStr, conditionStr; iss >> conditionStr >> resultStr; if (iss.fail()) error("Error reading line:",buffer); size_t idxType, idxResult; for (idxType=0; idxType<numConditionalOperatorLabels; idxType++) if (! strcmp(conditionStr.c_str(),conditionalOperatorLabels[idxType])) break; if (idxType == numConditionalOperatorLabels) error("Error reading line:",buffer); for (idxResult=0; idxResult<numConditionalValueLabels; idxResult++) if (! strcmp(resultStr.c_str(),conditionalValueLabels[idxResult])) break; if (idxResult == numConditionalValueLabels) error("Error reading line:",buffer); cod.conditionType = idxType; cod.resultType = idxResult; executionOrder_.push_back(IdxPair(OT_CONDITIONAL,conditionals_.size())); conditionals_.push_back(cod); } break; }; } return true; }
BOOL static getField(ifstream& fin, CString& marker, CString& contents) { static BOOL bNextLineIsBlank=FALSE; if(bNextLineIsBlank) { bNextLineIsBlank=FALSE; marker = "BLANKLINE"; contents =""; return TRUE; } ASSERTX(fin.is_open()); const int kBuffSize = 5000; // CURRENTLY MAX FIELD SIZE TOO! CString sField; LPTSTR buff = sField.GetBuffer(kBuffSize+2); LPTSTR start_buff = buff; fin.eatwhite(); char* b = buff; BOOL bFoundBlank = FALSE; do { fin.getline(b, kBuffSize - (b-buff), '\n'); b += fin.gcount(); if(fin.gcount()) { *(b-1) = '\n'; // put a carriage return in place of the null terminator *b = '\0'; // null terminate in case the next getline fails } while(fin.peek() == '\n') // keep swallowing blank lines until we see what's at the end of them { fin.get(); // swallow the \n; if(fin.peek() == '\\') // if the blanks are followed by a record, then this constitutes a true blank line { bFoundBlank=TRUE; break; } } } while (!bFoundBlank && fin.good() && fin.gcount() && fin.peek() != '\\'); bNextLineIsBlank = bFoundBlank; // will be used on the next call if( (kBuffSize-1) <= (b-buff)) // to long (and thus fin.gcount() == 0) { CString s; s.Format("The interlinear file appears to have a line which is longer than the maximum of %d characters which csCleanITX can handle.\n", kBuffSize); throw(s); } if(!buff[0]) // end of file { sField.ReleaseBuffer(-1); return FALSE; } // eat white space before the SFM Code (will always be there after a \dis) while(*buff && _ismbcspace(*buff)) { *buff='~'; // a hack so that iSpaceLoc, below, isn't set to the space between the "\dis" and marker ++buff; } int iSpaceLoc ; // figure out where the marker ends and field contents begin iSpaceLoc = sField.FindOneOf(" \t\n"); if(iSpaceLoc <= 1)// [0] should be the slash, [1] at least one char { sField.ReleaseBuffer(-1); return FALSE; } start_buff[iSpaceLoc] = '\0'; marker = buff + 1; // +1 to skip the slash marker.TrimRight(); contents = start_buff + iSpaceLoc+1; contents.TrimLeft(); contents.TrimRight(); sField.ReleaseBuffer(-1); return TRUE; }
void convertchan2beam( ifstream& datastrm) { ostringstream ossmsg; int pktSize = chanpkt1.getDataSize() + sizeof(beamHdr); for (;;) { datastrm.read((char *) &chanpkt1, pktSize); if (datastrm.gcount() != pktSize) { if (datastrm.eof()) return; // EOF should occur on this read ossmsg << "(1) unexpected data file read count " << dec << datastrm.gcount(); mylog(ossmsg); ossmsg << "Packets: " << packetCount << endl << seqGaps << " gap(s) in sequence; " << totalMissedPackets << " Total Missed Packets" << endl; mylog(ossmsg); exit(EXIT_FAILURE); } chanpkt1.marshall(); // Copy the header into the output buffer memcpy( reinterpret_cast<void *>(&beamHdr), reinterpret_cast<void *>(&chanpkt1), sizeof(beamHdr)) ; if (packetCount == 0) { expected[0] = expected[1] = beamHdr.seq; } // Check for missing packets checkForMissingPackets(&beamHdr ); datastrm.read((char *) &chanpkt2, pktSize); if (datastrm.gcount() != pktSize) { ossmsg << "(2) unexpected data file read count " << dec << datastrm.gcount(); mylog(ossmsg); ossmsg << "Packets: " << packetCount << endl << seqGaps << " gap(s) in sequence; " << totalMissedPackets << " Total Missed Packets" << endl; mylog(ossmsg); exit(EXIT_FAILURE); } // Read the next packet chanpkt2.marshall(); memcpy( reinterpret_cast<void *>(&chanHdr), reinterpret_cast<void *>(&chanpkt2), sizeof(chanHdr)) ; // Check for missing packets checkForMissingPackets( &chanHdr ); // Convert both packets' data to 8 bit complex and store in // output buffer. short2char(reinterpret_cast<signed char *>(beampkt.getData()), reinterpret_cast <signed short *>(chanpkt1.getData()), reinterpret_cast <signed short *>(chanpkt2.getData()), chanpkt1.getDataSize()); // sanity check for 0xaabbccdd endian order value if (beamHdr.order != ATADataPacketHeader::CORRECT_ENDIAN) { mylog("output packet header does not contain 0xaabbccdd endian value"); ossmsg << "Packets: " << packetCount << endl << seqGaps << " gap(s) in sequence; " << totalMissedPackets << " Total Missed Packets" << endl; mylog(ossmsg); exit(EXIT_FAILURE); } // fix some of the fields to make packets look like they came // from the beamformer beamHdr.src = ATADataPacketHeader::BEAM_104MHZ; // not really beamHdr.chan = 1; beamHdr.seq = sequence_num++; beamHdr.len = 2048; memcpy( reinterpret_cast<void *>(&beampkt), reinterpret_cast<void *>(&beamHdr), sizeof(beamHdr)); // write the SonATA channelizer compatible packet to stdout cout.write((char *) &beampkt, pktSize); } }
/* Process the requests from the clients, and return the feedback */ string process_request(char* p_cmd, int sock, int type) { string rt; string rcmd; // REAL CMD string args; if (type == FTP_CONTROL_TYPE) { cout << FTP_LOG_HEAD << " Get a control cmd: " << p_cmd << ", at " << sock << endl; int i; for (i = 0; i<CMD_MAX_LENGTH; i++) { if (p_cmd[i] != ' ' && p_cmd[i] != '\0' && p_cmd[i] != '\n') rcmd += p_cmd[i]; else break; } for (int j = i+1; j<CMD_MAX_LENGTH; j++) { if (p_cmd[j] != '\0' && p_cmd[i] != '\n') args += p_cmd[j]; else break; } //cout << "REAL CMD:" << rcmd << ", args:" << args << endl; if (rcmd == FTP_CMD_USER) { /* Request a password */ rt = FTP_RSP_R_PASS; strcpy(clients[sock].user, rcmd.c_str()); } else if (rcmd == FTP_CMD_PASS) { /* Get the user, then match them. */ /* In the future, this should be put into the database. */ if (args == "super123") { clients[sock].is_logged = true; clients[sock].set_password(args.c_str()); rt = FTP_RSP_P_PASS; } else rt = FTP_RSP_E_PASS; } else if (rcmd == FTP_CMD_LIST) { if (clients[sock].is_logged) { rt = FTP_RSP_LIST; DIR* dir; struct dirent* ptr; dir = opendir(FTP_DIR); while ((ptr = readdir(dir)) != NULL) { /* Strcmp will return a true value while dismatching. */ #ifdef __linux if (!strcmp(ptr->d_name, ".")) continue; else if (!strcmp(ptr->d_name, "..")) continue; struct stat file_stat; string file_path(FTP_DIR); file_path += ptr->d_name; stat(file_path.c_str(), &file_stat); rt += ptr->d_type; rt += " "; rt += ptr->d_name; string temp; temp += ptr->d_type; temp += " "; temp += ptr->d_name; // TODO-- This place, the file name may be greater than 50 place. for (int pos = temp.length(); pos<40; pos++) rt += " "; rt += byte2std(file_stat.st_size); rt += "\n"; #endif } closedir(dir); } else rt = FTP_RSP_R_LOG; } else if (rcmd == FTP_CMD_PUT) { if (clients[sock].is_logged) { rt = FTP_RSP_RF_START; string file_path(FTP_DIR); file_path += args; //strcpy(file_name[sock], args.c_str()); --real use strcpy(file_name_temp, args.c_str()); //ofs[sock].open(file_path.c_str(), ios::binary); ofs_temp.open(file_path.c_str(), ios::binary); } else rt = FTP_RSP_R_LOG; } else if (rcmd == FTP_CMD_GET) { if (clients[sock].is_logged) { rt = FTP_RSP_SF_START; string file_path(FTP_DIR); file_path += args; //strcpy(file_name[sock], args.c_str()); strcpy(file_name_temp, args.c_str()); //ifs[sock].open(file_path.c_str(), ios::binary); // TODO--judge whether the file exists. ifs_temp.open(file_path.c_str(), ios::binary); } else rt = FTP_RSP_R_LOG; } else if (rcmd == FTP_CMD_QUIT) { rt = FTP_RSP_BYE; } else rt = FTP_RSP_ERR; return rt; } else { // p_cmd = buffer if (p_cmd[0] == FTP_FTR_CMD_CONTINUE) { rt = FTP_FTR_CMD_CONTINUE; cout << FTP_LOG_HEAD << "Write into " << file_name_temp << ", size of " << strlen(p_cmd)-1 << endl; /* More comman way, but it need to be tested for binary and ascii! */ int b_len = strlen(p_cmd); for (int d = 1; d<b_len; d++) ofs_temp.put(p_cmd[d]); } else if (p_cmd[0] == FTP_FTR_CMD_ENDALL) { rt = FTP_FTR_CMD_ENDALL; memset(p_cmd, 0, sizeof(p_cmd)); strcpy(file_name[sock], ""); strcpy(file_name_temp, ""); //ofs[sock].close(); ofs_temp.close(); } /* Sending files would return the file data. */ else if (p_cmd[0] == FTP_FTS_CMD_CONTINUE) { /* Just like writing */ ifs_temp.read(buffer[sock]+1, sizeof(buffer[sock])-1); /* Get the file ending, gcount() is not useful for now. */ int real_read = ifs_temp.gcount(); if (real_read < FILE_MAX_BUFFER) { rt = FTP_FTS_CMD_ENDALL; buffer[sock][0] = FTP_FTS_CMD_ENDALL; } else { rt = FTP_FTS_CMD_CONTINUE; buffer[sock][0] = FTP_FTS_CMD_CONTINUE; } cout << FTP_LOG_HEAD << "Read from " << file_name_temp << ", size of " << real_read << endl; // For future features rt.append(p_cmd+1); } else if (p_cmd[0] == FTP_FTS_CMD_ENDALL) { rt = FTP_FTS_CMD_ENDALL; memset(p_cmd, 0, sizeof(p_cmd)); strcpy(file_name[sock], ""); strcpy(file_name_temp, ""); //ifs[sock].close(); ifs_temp.close(); } else { /* Breakpoint resends, and errors handle in the future. */ rt = FTP_FT_ERR; } return rt; } }
void processEvents(MultiCacheSimulator<RCDCLine, uint64_t>* sim, ifstream& eventFifo) { int currentLiveThreads = 0; /** the number of sync events seen thus far (in the trace; these events have not necessarily executed yet) for each sync object. Used to enforce a total order on sink/source events to a given sync object, based on their order of appearance in the event fifo. */ map<uint64_t, uint64_t> receivedEventsOfSyncObject; /** the last source event that was executed for each sync object */ map<uint64_t, uint64_t> activeEventOfSyncObject; vector< deque<Event> > eventBuffers( sim->m_allCaches.size() ); bool allDone = false; bool fifoOpen = true; /** the first event buffer to check: we rotate through them to avoid livelock */ uint64_t firstEventBuffer = 0; unsigned iterationsWithoutProgress = 0; Event e; while ( true ) { if ( iterationsWithoutProgress > 100000 ) { s_forcedCommits++; iterationsWithoutProgress = 0; sim->finishQuantumRound(); } // help avoid forced commits by ignoring threads that are finished if ( !fifoOpen ) { s_unprocessedEvents = 0; for ( unsigned i = 0; i < eventBuffers.size(); i++ ) { if ( eventBuffers.at(i).empty() ) { sim->block( i ); } else { s_unprocessedEvents += eventBuffers.at(i).size(); } } } // prefer draining per-thread queues over reading from the front-end bool tookEventFromLocalBuffer = false; /* NB: this code means that we will process a bunch of events for each thread, instead of (more realistically) interleaving events from different threads. */ for ( unsigned i = 0; i < eventBuffers.size(); i++ ) { const unsigned bufferIndex = (i + firstEventBuffer) % eventBuffers.size(); if ( !sim->stalledAtQuantumBoundary(bufferIndex) && !eventBuffers.at(bufferIndex).empty() ) { e = eventBuffers.at(bufferIndex).front(); eventBuffers.at(bufferIndex).pop_front(); tookEventFromLocalBuffer = true; break; } } firstEventBuffer++; if ( fifoOpen && !tookEventFromLocalBuffer ) { // blocking read from fifo assert( eventFifo.good() ); eventFifo.read( (char*) &e, sizeof(Event) ); const unsigned bytesRead = eventFifo.gcount(); if ( 0 == bytesRead ) { assert( eventFifo.eof() ); fifoOpen = false; iterationsWithoutProgress++; continue; } assert( sizeof(Event) == bytesRead ); int cpuid = sim->cpuOfTid( e.m_tid ); // enforce a total order on sync events for a given sync object if ( e.m_isLifeLock ) { switch ( e.m_type ) { case HAPPENS_BEFORE_SOURCE: case HAPPENS_BEFORE_SINK: { map<uint64_t,uint64_t>::iterator mit = receivedEventsOfSyncObject.find( e.m_syncObject ); if ( mit == receivedEventsOfSyncObject.end() ) { // NB: logical times start at 1 receivedEventsOfSyncObject[ e.m_syncObject ] = 1; e.m_logicalTime = 1; } else { uint64_t newval = mit->second + 1; receivedEventsOfSyncObject[ e.m_syncObject ] = newval; e.m_logicalTime = newval; } } break; default: break; } } // core is stalled, so buffer the event locally if ( sim->stalledAtQuantumBoundary(e.m_tid) ) { eventBuffers.at( cpuid ).push_back( e ); iterationsWithoutProgress++; continue; } else { // if core is not stalled, we should have drained its buffer already assert( eventBuffers.at(cpuid).empty() ); } } if( sim->stalledAtQuantumBoundary(e.m_tid) ) { iterationsWithoutProgress++; continue; } bool madeProgress = true; // event dispatch switch ( e.m_type ) { case ROI_START: case ROI_FINISH: // NOPs for now break; case THREAD_START: currentLiveThreads++; sim->setLiveThreads( currentLiveThreads ); s_numSpawnedThreads++; s_maxLiveThreads = max( s_maxLiveThreads, currentLiveThreads ); break; case THREAD_FINISH: currentLiveThreads--; sim->setLiveThreads( currentLiveThreads ); sim->block( e.m_tid ); // when main thread exits, tear down simulation if ( 0 == e.m_tid ) { allDone = true; } break; case THREAD_BLOCKED: sim->block( e.m_tid ); break; case THREAD_UNBLOCKED: sim->unblock( e.m_tid ); break; case MEMORY_ALLOCATION: // NOP for now break; case MEMORY_FREE: // NOP for now break; case HAPPENS_BEFORE_SOURCE: { if ( syncEventCanProceed(e, activeEventOfSyncObject, sim, eventBuffers) ) { sim->syncOp( e.m_tid, SYNC_SOURCE, false, INVALID_THREADID, e.m_syncObject ); } else madeProgress = false; } break; case HAPPENS_BEFORE_SINK: { if ( syncEventCanProceed(e, activeEventOfSyncObject, sim, eventBuffers) ) { sim->syncOp( e.m_tid, SYNC_SINK, e.m_hbSourceThread != INVALID_THREADID, e.m_hbSourceThread, e.m_syncObject ); } else madeProgress = false; } break; case MEMORY_READ: sim->cacheRead( e.m_tid, e.m_addr, e.m_memOpSize, usesStoreBuffer( e ) ); break; case MEMORY_WRITE: sim->cacheWrite( e.m_tid, e.m_addr, e.m_memOpSize, usesStoreBuffer( e ) ); break; case BASIC_BLOCK: s_insnsExecuted += e.m_insnCount; //if ( (s_insnsExecuted % 5000000) < e.m_insnCount ) { //cerr << "[debug] (nd/tso/hb):" << s_knobs.count(KnobNondet) << s_knobs.count(KnobTSO) << s_knobs.count(KnobHB) << " executed " << s_insnsExecuted << " insns" << endl; //} sim->basicBlock( e.m_tid, e.m_insnCount ); break; case INVALID_EVENT: default: cerr << e.toString() << endl; assert(false); } iterationsWithoutProgress = madeProgress ? 0 : iterationsWithoutProgress+1; if ( allDone ) { // there shouldn't be any queued-up events s_unprocessedEvents = 0; for ( unsigned i = 0; i < eventBuffers.size(); i++ ) { s_unprocessedEvents += eventBuffers.at(i).size(); } return; } } // end while(true) } // end processEvents()