void prepare_fasta_output_distribution(ifstream& fp_input, ofstream& fp_fasta, ofstream& fp_detail, 
					string& reference, meta_data& meta_details)
{
  	ofstream condense_log;
  	condense_log.open("condense_log.tsv", ofstream::out);

        vector<map<char,int>> mutationStats;
	//baslragsh;

        string read_name, cigar, alignment, quality, len;
	string alignment_string, quality_score, detail;
	int total_score, average_score, opt, reflength;
	int i, k, count, refindex, readindex;
	int fprimer_length, except_primer;
        unordered_map<string, int> umap;
	vector< pair<string, string>> sequences; 
	double exponent;

	int raw_sequence_count = 0;//debug purposes only

	fp_detail << "###############################################Sequence Anaysis Starts##############################################" << endl;
	
	distribution error_correction[reference.length()];
	for(i = 0; i < reference.length(); i++)
	{
	  	mutationStats.push_back(map<char,int>());
		mutationStats[i]['A'] = 0;
		mutationStats[i]['C'] = 0;
		mutationStats[i]['G'] = 0;
		mutationStats[i]['T'] = 0;
		mutationStats[i]['N'] = 0;
		for(k = 0; k < ILLUMINA_SCORE; k++)
			error_correction[i].qscore[k] = 0;

		error_correction[i].matching_base = 0;
		error_correction[i].mismatch = 0;
		error_correction[i].expected_mismatch = 0.0;
		error_correction[i].standard_deviation = 0.0;
	}
	

	condense_log << "Observed,Expected,zscore,correct,raw pval,A,C,G,T,N,%mutated" << endl;
	fprimer_length = meta_details.fprimer.length();
	except_primer = reference.length() - meta_details.fprimer.length();

	while(getline(fp_input, read_name))
        {
	  	raw_sequence_count++;
                getline(fp_input, cigar);
                getline(fp_input, alignment_string);
		getline(fp_input, quality_score);
		
		total_score = 0;
		for(i = 0; i < quality_score.length(); i++)
		{
			total_score += quality_score.at(i) - '!';
		}

		average_score = total_score / quality_score.length();
		fp_detail << endl << "Global Average Score = " << average_score << endl;

		if(average_score < GLOBAL_QSCORE)
			continue;
		
		refindex = fprimer_length;
		i = readindex = 0;
		alignment = "";
		quality = "";
		len = "";

		fp_detail << "CIGAR = " << cigar << endl;
		while(i < cigar.length())
		{
			if(cigar.at(i) >= '0' && cigar.at(i) <= '9')
				len += cigar.at(i);
			else
			{
				opt = cigar.at(i);
				count = atoi(len.c_str());
				fp_detail << "Count = " << count << ", and Option = " << opt << endl;
				len = "";			

				//fp_detail << "Reference = " << reference << endl;
				//fp_detail << "Alignment = " << alignment_string << endl;

				if(opt == 'M' || opt == 'X')
				{
					for(k = 0; k < count; k++)
					{
						if(alignment_string.at(readindex) == reference.at(refindex))
						{
							error_correction[refindex].matching_base += 1;
						}
						else 
						{
							error_correction[refindex].mismatch += 1;
							mutationStats[refindex][alignment_string.at(readindex)] += 1;
							char blah = alignment_string.at(readindex);
							//if(!(alignment_string.at(readindex) == 'A') && (alignment_string.at(readindex) != 'C') && (alignment_string.at(readindex) != 'G') && (alignment_string.at(readindex) != 'T')){
							//if((blah != 'A') && (blah != 'C') && (blah != 'G') && (blah != 'T')){
							fp_detail << "BASE Mismatch is Found at = " << refindex << endl;
						}

						error_correction[refindex].qscore[quality_score.at(readindex) - '!'] += 1;

						//fp_detail << k << "= (" << refindex << ", " << readindex << ") = " 
						//	"(" << reference.at(refindex) << ", " << alignment_string.at(readindex) << ")" << endl;

						refindex += 1;
						readindex += 1;
					}	
				}
				else if(opt == 'I')
				{
					readindex += count;
				}
				else if(opt == 'D')
				{
					refindex += count;
				}
				else
				{
					assert(false);
				}
			} 
			
			i += 1;
		}

	}

	fp_detail << endl;
	for(i = 0; i < reference.length(); i++)
	{
		fp_detail << "Showing Analysis for Index = " << i << endl;
		if(error_correction[i].mismatch + error_correction[i].matching_base == 0)
			continue;

		for(k = 0; k < ILLUMINA_SCORE; k++)
		{
			if(error_correction[i].qscore[k] == 0)
				continue;

			exponent = pow(10, -1 * k *  0.1);
			fp_detail << "QSCORE = " << k << ", and COUNT = " << error_correction[i].qscore[k] << ", and Exponent = " << exponent << endl;
			error_correction[i].expected_mismatch += (error_correction[i].qscore[k] * exponent);
			error_correction[i].standard_deviation += (error_correction[i].qscore[k] * exponent * (1 - exponent));
			
			fp_detail << "Expected Number of Mismatch = " << error_correction[i].expected_mismatch;
	                fp_detail << ", and Standard Deviation = " << error_correction[i].standard_deviation << endl;

		}

		error_correction[i].standard_deviation = sqrt(error_correction[i].standard_deviation);
		error_correction[i].zscore = (error_correction[i].mismatch + 0.5 - 
						error_correction[i].expected_mismatch) / 
						error_correction[i].standard_deviation;
		
		error_correction[i].pvalue = 1-c_norm(error_correction[i].zscore);
		cout << "A pvalue is: " << error_correction[i].pvalue << endl;

		fp_detail << "Error Correction At Position = " << i - fprimer_length << endl;
		fp_detail << "Mismatch = " << error_correction[i].mismatch << ", and Matching BASE = " 
				<< error_correction[i].matching_base << endl;

		fp_detail << "Expected Number of Mismatch = " << error_correction[i].expected_mismatch;
		fp_detail << ", and Standard Deviation = " << error_correction[i].standard_deviation << endl;
		fp_detail << "Finally calculated ZSCORE = " << error_correction[i].zscore << endl;
		fp_detail << "Finally p-value is = " << error_correction[i].pvalue << endl;
		fp_detail << "Fail to reject null hypothesis is = " << (error_correction[i].reject_by_bh_threshold ? "0" : "1")<< endl;
		
		fp_detail << endl;

		//if(error_correction[i].mismatch > 0)
		//	assert(false);
	}
	set_reject_flags(error_correction, reference.length());

	for(i = 0; i < reference.length(); i++){
		if(error_correction[i].mismatch + error_correction[i].matching_base == 0)
		  continue;
		condense_log << error_correction[i].mismatch << "," << error_correction[i].expected_mismatch << "," << error_correction[i].zscore;
		condense_log << "," << (error_correction[i].reject_by_bh_threshold ? "0" : "1");
		condense_log << "," << (error_correction[i].pvalue) << ",";

		condense_log << mutationStats[i]['A'] << ",";
		condense_log << mutationStats[i]['C'] << ",";
		condense_log << mutationStats[i]['G'] << ",";
		condense_log << mutationStats[i]['T'] << ",";
		condense_log << mutationStats[i]['N'] << ",";
		condense_log << "," << double((double)error_correction[i].mismatch / (double)raw_sequence_count) << endl;
		
          if(error_correction[i].reject_by_bh_threshold){
	    fp_detail << "No Correcting at position " << i << endl;
	  } else {
	    fp_detail << "Correcting at position " << i << endl;

	  }
	}
	//condense_log.close();
	fp_input.clear();
	fp_input.seekg(0, fp_input.beg);

	while(getline(fp_input, read_name))
        {
                getline(fp_input, cigar);
                getline(fp_input, alignment_string);
		getline(fp_input, quality_score);
		
		total_score = 0;
		for(i = 0; i < quality_score.length(); i++)
		{
			total_score += quality_score.at(i) - '!';
		}

		average_score = total_score / quality_score.length();
		fp_detail << endl << "Global Average Score = " << average_score << endl;

		if(average_score < GLOBAL_QSCORE)
			continue;
		
		refindex = meta_details.fprimer.length();
		i = readindex = 0;
		alignment = "";
		quality = "";
		len = "";

		fp_detail << "CIGAR = " << cigar << endl;
		while(i < cigar.length())
		{
			if(cigar.at(i) >= '0' && cigar.at(i) <= '9')
				len += cigar.at(i);
			else
			{
				opt = cigar.at(i);
				count = atoi(len.c_str());
				fp_detail << "Count = " << count << ", and Option = " << opt << endl;
				len = "";			

				fp_detail << "Reference = " << reference << endl;
				fp_detail << "Alignment = " << alignment_string << endl;

				if(opt == 'M' || opt == 'X')
				{
					for(k = 0; k < count; k++)
					{
					  	/*if(error_correction[refindex].reject_by_bh_threshold){
						  cout << "Rejectin null hypothesis whooo at " << refindex << endl;
						} else {
						  cout << "Acceptin null hypothesis =( " << refindex << endl;
						}*/
						if(alignment_string.at(readindex) != reference.at(refindex) &&
							//error_correction[refindex].zscore < ZSCORE &&
							!error_correction[refindex].reject_by_bh_threshold)
						{
							fp_detail << endl << "Error CORRECTION Point at " << readindex << endl;
							fp_detail << "ZSCORE at this point = " << error_correction[refindex].zscore << endl;
							fp_detail << "Changin " << alignment_string.at(readindex) << " To " <<
									reference.at(readindex) << endl;
						
							alignment += reference.at(refindex);
							quality += quality_score.at(readindex);

						}
						else
						{
						        fp_detail << endl << "No correction at " << refindex;
							alignment += alignment_string.at(readindex);
							quality += quality_score.at(readindex);
						}

						//fp_detail << k << "= (" << refindex << ", " << readindex << ")" << endl;

						refindex += 1;
						readindex += 1;
					}	
				}
				else if(opt == 'I')
				{
					for(k = 0; k < count; k++)
					{
						alignment += alignment_string.at(readindex);
						quality += quality_score.at(readindex);

						readindex += 1;
					}
				}
				else if(opt == 'D')
				{
					refindex += count;
				}
				else
				{
					assert(false);
				}
			} 
			
			i += 1;
		}

		detail = cigar + "\n" + alignment;
		if(umap.find(detail) == umap.end())
                {
                        //umap[alignment] = 1;
                	sequences.push_back(make_pair(read_name, detail));
			umap[detail] = 1;
                }
                else
                {
                        umap[detail] += 1;
                }

		/*
                fp_fasta << ">" << read_name;

                fp_fasta << "|DUPCOUNT=1";
                fp_fasta << "|CIGAR=" << cigar << endl;
                fp_fasta << alignment << endl;
		*/
        }



	for(i = 0; i < sequences.size(); i++)
	{
	        fp_fasta << ">" << sequences[i].first;

                fp_fasta << "|DUPCOUNT=" << umap[sequences[i].second];
                fp_fasta << "|CIGAR=" << sequences[i].second << endl;
                //fp_fasta << alignment << endl;

	}

        fp_input.close();
        fp_fasta.close();
	condense_log.close();

	return;
}
예제 #2
0
void set_file_length(ifstream &infile, int &file_size)
{
    infile.seekg (0, ios::end);
    file_size = infile.tellg();
    infile.seekg (0, ios::beg);
}
예제 #3
0
void DICOMParser::ParseUndefLengthSequence(ifstream& fileDICOM, short& iSeqGroupID, short& iSeqElementID, DICOMFileInfo& info, const bool bImplicit, const bool bNeedsEndianConversion, UINT32 iDepth) {
  for (int i = 0;i<int(iDepth)-1;i++) Console::printf("  ");
  Console::printf("iGroupID=%x iElementID=%x elementType=SEQUENCE (undef length)\n", iSeqGroupID, iSeqElementID);
#else
void DICOMParser::ParseUndefLengthSequence(ifstream& fileDICOM, short& , short& , DICOMFileInfo& info, const bool bImplicit, const bool bNeedsEndianConversion) {
#endif
  int iItemCount = 0;
  UINT32 iData;

  string value;
  short iGroupID, iElementID;
  DICOM_eType elementType;

  do {
    fileDICOM.read((char*)&iData,4);

    if (iData == 0xE000FFFE) {
      iItemCount++;
      fileDICOM.read((char*)&iData,4);
      #ifdef DEBUG_DICOM
        for (UINT32 i = 0;i<iDepth;i++) Console::printf("  ");
        Console::printf("START ITEM\n");
      #endif
    } else if (iData == 0xE00DFFFE) {
      iItemCount--;
      fileDICOM.read((char*)&iData,4);
      #ifdef DEBUG_DICOM
        for (UINT32 i = 0;i<iDepth;i++) Console::printf("  ");
        Console::printf("END ITEM\n");
      #endif
    } else if (iData != 0xE0DDFFFE) fileDICOM.seekg(-4, ios_base::cur);


    if (iItemCount > 0) {
      ReadHeaderElemStart(fileDICOM, iGroupID, iElementID, elementType, iData, bImplicit, bNeedsEndianConversion);

      if (elementType == TYPE_SQ) {
        fileDICOM.read((char*)&iData,4);
        if (iData == 0xFFFFFFFF) {
          #ifdef DEBUG_DICOM
          ParseUndefLengthSequence(fileDICOM, iGroupID, iElementID, info, bImplicit, bNeedsEndianConversion, 1);
          #else
          ParseUndefLengthSequence(fileDICOM, iGroupID, iElementID, info, bImplicit, bNeedsEndianConversion);
          #endif
        } else {
          // HACK: here we simply skip over the entire sequence
          value.resize(iData);
          fileDICOM.read(&value[0],iData);
          value = "SKIPPED EXPLICIT SEQUENCE";
        }
      } else {

        if (iData == 0xFFFFFFFF) {
          #ifdef DEBUG_DICOM
            ParseUndefLengthSequence(fileDICOM, iGroupID, iElementID, info, bImplicit, bNeedsEndianConversion, iDepth+1);
          #else
            ParseUndefLengthSequence(fileDICOM, iGroupID, iElementID, info, bImplicit, bNeedsEndianConversion);
          #endif
        } else {
          // In a debug build, crash and burn so we know where the error
          // occurred.
          // In release, nudge up the data value so we're guaranteed to make
          // progress; otherwise the application can hang in an infinite loop
          // of reading 0's.
          assert(iData > 0);
          if(iData == 0) { iData = 1; }

          value.resize(iData);
          fileDICOM.read(&value[0],iData);
          #ifdef DEBUG_DICOM
            for (UINT32 i = 0;i<iDepth;i++) Console::printf("  ");
            Console::printf("iGroupID=%x iElementID=%x elementType=%s value=%s\n", iGroupID, iElementID, DICOM_TypeStrings[int(elementType)].c_str(), value.c_str());
          #endif
        }
      }
    }

  } while (iData != 0xE0DDFFFE && !fileDICOM.eof());
  fileDICOM.read((char*)&iData,4);

#ifdef DEBUG_DICOM
  for (UINT32 i = 0;i<iDepth;i++) Console::printf("  ");
  Console::printf("END SEQUENCE\n");
#endif

}
예제 #4
0
/////////////////////////////////////////// Global processing functions
// The object model type of mesh
void readObjectModel(ifstream &in, BinHeadType &thdr) {
	BinHeader hdr;
	BinHeader2 hdr2;

	log_info("LGMD mesh processing - e.g. an object");
	long size = 0;

	log_info("Header version : %d", thdr.version);

	// determine the header size depending on the version of the mesh
	switch ((int) thdr.version) {
		case 3: size = SIZE_BIN_HDR_V3; break;
		case 4: size = SIZE_BIN_HDR_V4; break;
		case 6: size = SIZE_BIN_HDR_V6; break;
		default: size = -1; // TODO: fixfix
	}

	if (size <= 0) {
		log_fatal("FATAL: The object mesh has an unsupported version : %d", thdr.version);
		return;
	}

	// erase the header
	memset((char *) &hdr, 0, sizeof(BinHeader));

	// read the model header
	in.read((char *) &hdr, size);

	// we should definetaly complete the header data depending on the version somehow...

	if (thdr.version == 6) { // we have another header to look at
		log_debug("V6 header");
		in.seekg(hdr.offset_hdr2, ios::beg);
		in.read((char *) &hdr2, sizeof(BinHeader2));

		// is this a right approach? I hope so
		hdr.offset_uv = hdr2.offset_uv;
		hdr.offset_verts = hdr2.offset_verts;
		hdr.offset_norms = hdr2.offset_norms;
		num_uvs = hdr2.num_uvs;
	} else {
		// It seems that pre-6 version headers do not contain number of uvs
		num_uvs = (hdr.offset_vhots - hdr.offset_uv) / sizeof (UVMap);

		log_debug("pre - V6 header, calculated num_uvs = %d", num_uvs);
	}

	log_info("Reading Tables:");

	log_info(" * Materials (%d)", hdr.num_mats);
	log_debug("  - offset %06lX",hdr.offset_mats);

	// Materials
	materials = new MeshMaterial[hdr.num_mats];

	// this is not good! we read right through other tables...
	in.seekg(hdr.offset_mats, ios::beg);

	in.read((char *) materials, hdr.num_mats * sizeof(MeshMaterial));

	for (int x = 0; x < hdr.num_mats; x++) {
		// logging the material names:
		log_debug("       - material name : %s (type: %04X, slot: %d)", materials[x].name, materials[x].type, materials[x].slot_num);

		// computing the maximal slot number
		if (materials[x].slot_num > maxslotnum)
			maxslotnum = materials[x].slot_num;

		// intialise the outputter
		SingleMaterialMesh *ins = new SingleMaterialMesh(fileBaseName, x, (materials[x].type == MD_MAT_TMAP));

		if (ins == NULL)
			log_error("Material %d failed to construct", x);

		outputters.push_back(ins);
	}

	// slot to index material conversion table preparation
	if (thdr.version == 3) { // it seems only v3 meshes use slots for materials
		slot2matnum = new short[maxslotnum + 1];
		for (int x = 0; x < maxslotnum; x++)
			slot2matnum[x] = -1;

		for (int x = 0; x < hdr.num_mats; x++) {
			int slot = materials[x].slot_num;
			log_debug("Adding slot %d (-> %d) to slot2matnum", slot, x);
			slot2matnum[slot] = x;
		}
	}

	in.seekg(hdr.offset_mat_extra, ios::beg);

	// if we need extended attributes
	if ( hdr.mat_flags & MD_MAT_TRANS || hdr.mat_flags & MD_MAT_ILLUM ) {
		log_info(" * Extra materials (%d)", maxslotnum);
		log_info(" * Extra material record size %d", hdr.size_mat_extra);
		log_debug("  - actual offset %06lX", (int) in.tellg());
		// Extra Materials
		materialsExtra = new MeshMaterialExtra[hdr.num_mats];
		
		
		int extrasize = hdr.size_mat_extra - 8;
		if (extrasize < 0) {
			log_error(" * Extra size less than 8!");
			extrasize = 0;
		}
		
		for (int i = 0; i < hdr.num_mats; ++i) {
			in.read((char*)&materialsExtra[i], sizeof(MeshMaterialExtra));
			in.seekg(extrasize, ios::cur);
			log_debug("   - Mat. %d trans %f illum %f", i, materialsExtra[i].trans, materialsExtra[i].illum);
		}
	}

	// Read the UV map vectors
	log_info(" * UVMAP (%d / %d)", hdr.num_verts, num_uvs);
	log_debug("  - offset %06lX",hdr.offset_uv);
	// prepare and read uvmaps
	uvs = new UVMap[num_uvs];

	in.seekg(hdr.offset_uv, ios::beg);
	// I have to rely on shadowspawn here: read uvs in the number of verts - those are differing, but I guess that this is solvable later on
	// After looking into the binary, the num_uvs calculated seem to be reasonable
	// in.read((char *) uvs, hdr.num_verts * sizeof(UVMap));
	in.read((char *) uvs, num_uvs * sizeof(UVMap));

	// TODO: shadowspawn reverses the U part of pre-6 version mesh UV table here. See if we need that too


	// VHOT:
	log_info(" * VHOT (%d)", hdr.num_vhots);
	log_debug("  - offset %06lX",hdr.offset_vhots);

	if (hdr.num_vhots > 0) {
		// prepare and read the vhots
		vhots = new VHotObj[hdr.num_vhots];
		in.seekg(hdr.offset_vhots, ios::beg);
		in.read((char *) vhots, hdr.num_vhots * sizeof(VHotObj));
	} else {
		vhots = NULL;
		log_info("No vhots in this model");
	}
	// Vertex table....
	log_info(" * Vertices (%d)", hdr.num_verts);
	log_debug("  - offset %06lX",hdr.offset_verts);

	// prepare and read the vertices
	vertices = new Vertex[hdr.num_verts];
	in.seekg(hdr.offset_verts, ios::beg);
	in.read((char *) vertices, hdr.num_verts * sizeof(Vertex));

	log_debug("Setting important data structures in outputters...");

	log_info("Object tree processing:");

	SaveMaterialFile(fileBaseName, "materials/", hdr);
	ProcessObjects(in, thdr, hdr, hdr2);

	for (int x = 0; x < hdr.num_mats; x++) {
		outputters[x]->setObjects(objects, hdr.num_objs);
		outputters[x]->setVertices(vertices, hdr.num_verts);
		outputters[x]->setUVMaps(uvs, num_uvs);
	}

	// final output
	char filepath[2048];

	snprintf(filepath, 1024, "%s%s.xml", meshOutPath, fileBaseName); // TODO: Path!

	ofstream ofs(filepath);

	ofs << "<mesh>" << endl << "\t<submeshes>" << endl;

	for (int x = 0; x < hdr.num_mats; x++)
		outputters[x]->output(ofs, "\t\t");


	ofs << "\t</submeshes>" << endl << "</mesh>" << endl;

	ofs.close();

	// cleanout
	log_info("Releasing used pointers");
	log_debug(" * outputters");
	for (int x = 0; x < hdr.num_objs; x++)
		if (outputters[x] != NULL) {
			log_debug("   - %d", x);
			//SingleMaterialMesh *m = outputters[x];
			//delete m; // TODO: Why oh why is this causing segfaults?
		}

	log_debug(" * vhot");
	if (vhots != NULL)
		delete[] vhots;

	log_debug(" * vert");
	if (vertices != NULL)
		delete[] vertices;

	log_debug(" * vert");
	if (uvs != NULL)
		delete[] uvs;

	log_debug(" * materials");
	if (materials != NULL)
		delete[] materials;

	log_debug(" * extras");
	if (materialsExtra != NULL)
		delete[] materialsExtra;

	log_debug(" * objects");
	if (objects != NULL)
		delete[] objects;

	log_debug(" * slot2matnum");

	if (slot2matnum != NULL)
		delete[] slot2matnum;

	// the end
	log_info("all done");
}
예제 #5
0
int Towers_lg(int disks, int count, int arr2[], int arr3[], bool &ppart, ifstream &infile, ofstream & outfile, ofstream &fout)
{
	int even = count%2;
	int totres;
	int split;
   	int split_res;
   	int remaining;
 	int remaining_res;
 	int result;
 	int smallest_so_far;
 	int disks1;

	//Setting in reading stream cursor back to the beginning
	infile.clear();
	infile.seekg(0);


	for(disks1 = 1; disks1 < count; disks1++)
 	{
 		arr2[disks1 - 1] = 2*(disks1) - 1;
 		arr3[disks1 - 1] = 2*(disks1) - 1;
 	}

	//If the number of disks is equal to or less than the number of pegs, then calculate based on 2n-1
	if(disks < count)
	{
		cout << "The array is: ";
		outfile << "The array is: ";
		if(even == 0)
		{
			for (int g = 0; g < disks; g++)
			{
				cout << arr2[g] << " ";
				outfile << arr2[g] << " ";
				fout << arr2[g] << " ";
			}
			cout << endl;
			outfile << endl;
			//Setting the writting stream cursor back to the beginning
			fout.clear();
			fout.seekp(0);
			//Making sure the reading stream is at the beginning of the file
			infile.clear();
			infile.seekg(0);

			ppart = true;
			return totres = arr2[disks - 1];
		}
		else
		{
			cout << "The array is: ";
			outfile << "The array is: ";
			for (int r = 0; r < disks; r++)
			{
				cout << arr3[r] << " ";
				outfile << arr3[r] << " ";
				fout << arr3[r] << " ";
			}
			cout << endl;
			outfile << endl;

			//Setting the writting stream cursor back to the beginning
			fout.clear();
			fout.seekp(0);

			//Making sure the reading stream is at the beginning of the file
			infile.clear();
			infile.seekg(0);

			ppart = true;
			return totres = arr3[disks - 1];
		}
	}
	else
	{
		while(!infile.eof())
		{
			for(int z = 0; z < disks; z++)
			{
				infile >> arr2[z];
			}
		}
		//Resetting the reading cursor back to the beginning
		infile.clear();
		infile.seekg(0);

		//Setting the first position in the next array
		arr3[0] = 1;
		
		for(int j = 4; j <= count; j++)
		{
			//Resetting "smallest_so_far"
			smallest_so_far = INT_MAX;

			for(split = 1; split < disks1; split++)
			{
				remaining = disks1 - split;
				split_res = arr3[split - 1];

				remaining_res = arr3[remaining - 1];
				result = 2*(split_res) + remaining_res;

				//Only update "smallest_so_far" if "result" is lower
				if(result <= smallest_so_far)
					smallest_so_far = result;
			}
			//Setting the used array to "smallest_so_far"
			arr2[j - 1] = smallest_so_far;
		}
	
		cout << "The array is: ";
		outfile << "The array is: ";
		for (int g = 0; g < disks; g++)
		{
			cout << arr2[g] << " ";
			outfile << arr2[g] << " ";
			fout << arr2[g] << " ";
		}
		cout << endl;
		outfile << endl;

		//Setting the writting stream cursor back to the beginning
		fout.clear();
		fout.seekp(0);

		//Making sure the reading stream is at the beginning of the file
		infile.clear();
		infile.seekg(0);
		
		ppart = false;
	}
	return totres = arr3[disks - 1];
}
예제 #6
0
void loadPolygon(ifstream &in, BinHeadType &thdr, BinHeader &hdr, BinHeader2 &hdr2, int objidx, SubObjectHeader &shdr, long offset) {
	log_verbose("\t\t\t\tloading polygon on offset %04X : ", hdr.offset_pgons + offset);
	int oldpos = in.tellg();

	ObjPolygon polyHdr;

	in.seekg(hdr.offset_pgons + offset, ios::beg);
	in.read((char *) &polyHdr, sizeof(ObjPolygon));


	short *vertex_indices;
	short *normal_indices; // ??
	short *uv_indices = NULL;

	vertex_indices = new short[polyHdr.num_verts];
	normal_indices = new short[polyHdr.num_verts];
	in.read((char *) vertex_indices, sizeof(short) * polyHdr.num_verts);
	in.read((char *) normal_indices, sizeof(short) * polyHdr.num_verts);

	if ( polyHdr.type == MD_PGON_TMAP ) {
		log_verbose("\t\t\t\tTextured, reading indices");
		uv_indices = new short[polyHdr.num_verts];
		in.read((char *) uv_indices, sizeof(short) * polyHdr.num_verts);
	}

	uint8_t Material;

	if ( thdr.version == 4 )
		in.read((char*)&Material, 1);
	else {
		int mat = SlotToMatIndex(polyHdr.data); // Material here uses a slot... we'll se if the v4 does that too (so no -1)
		log_verbose("\t\t\t\tMaterial, type, textured : %d %02X %d", mat, polyHdr.type, (polyHdr.type == MD_PGON_TMAP));

		if ((mat < 0) || (mat >= hdr.num_mats)) {
			log_error("\t\t\t\tInvalid material number %d, not adding the polygon (slot %d)", mat, polyHdr.data);
			return;
		}

		Material = mat;
	}

	// now let's triangularize
	// we allways have N-2 triangles per polygon (3 -> 1, 4 -> 2... etc)
	for (int i = 0; i < polyHdr.num_verts - 1; i++) {
		// depending on texturization
		if ( polyHdr.type == MD_PGON_TMAP )
			addTriangle(hdr, shdr, objidx, Material,
				vertex_indices[0],
				vertex_indices[i+1],
				vertex_indices[i],
				uv_indices[0],
				uv_indices[i+1],
				uv_indices[i]);
		else {
			if (materials[static_cast<size_t>(Material)].type == MD_MAT_TMAP) {
				log_error("Material needs UV and none found, using vertex index for uv");
				addTriangle(hdr, shdr, objidx, Material,
					vertex_indices[0],
					vertex_indices[i+1],
					vertex_indices[i],
					vertex_indices[0],
					vertex_indices[i+1],
					vertex_indices[i]);
			} else
				addTriangle(hdr, shdr, objidx, Material,
					vertex_indices[0],
					vertex_indices[i+1],
					vertex_indices[i]);
		}
	}

	// release the structures
	delete[] vertex_indices;
	delete[] normal_indices;
	if (uv_indices != NULL)
		delete[] uv_indices;

	// return to the old position
	in.seekg(oldpos, ios::beg);
}
예제 #7
0
void parseSubNode(ifstream &in, BinHeadType &thdr, BinHeader &hdr, BinHeader2 &hdr2, int objidx, SubObjectHeader &shdr, long offset) {
	char splittype;
	short polys[1024];
	int polycount;

	log_debug("\t\t\tSub-node %d on offset %04lX", offset, hdr.offset_nodes + offset);
	in.seekg(hdr.offset_nodes + offset, ios::beg);
	in.read(&splittype, 1);

	NodeRaw     nr;
	NodeCall    nc;
	NodeSplit   ns;

	switch (splittype) {
		case MD_NODE_HDR:
				NodeHeader ndhdr;
				in.read((char *) &ndhdr, sizeof(NodeHeader));
				log_debug("\t\t\t-- Header node --");
				log_debug("\t\t\t * Node values: [flag, objnum, unk1]: %d %d %d", (int)ndhdr.flag, (int)ndhdr.object_number, (int)ndhdr.c_unk1);

				parseSubNode (in, thdr, hdr, hdr2, objidx, shdr, offset + sizeof(NodeHeader));
			break;
		case MD_NODE_SPLIT:
				log_debug("\t\t\t-- Split node --");
				in.read((char *) &ns, sizeof(NodeSplit));
				// the polygons are read sequentially, and processed

				polycount = ns.pgon_before_count + ns.pgon_after_count;
				in.read((char *) polys, sizeof(short) * polycount);

				log_debug("\t\t\tChilds : %d - %d", ns.behind_node, ns.front_node);

				log_verbose("\t\t\tPolygon count: %d", polycount);

				if (ns.behind_node < offset)
					log_error("\t\t\tSplit to a lower node number - behind node!");
				else
				if ( ( ns.behind_node >= shdr.node_start ) ) { // todo:  && ( ns.behind_node < NodeMax )
					parseSubNode (in, thdr, hdr, hdr2, objidx, shdr, ns.behind_node);
				}

				if (ns.behind_node < offset)
					log_error("\t\t\tSplit to a lower node number - front node!");
				else
				if ( ( ns.front_node >= shdr.node_start ) ) { // todo: && ( ns.front_node < NodeMax )
					parseSubNode (in, thdr, hdr, hdr2, objidx, shdr, ns.front_node);
				}

				for (int n = 0; n < polycount; n++)
					loadPolygon(in, thdr, hdr, hdr2, objidx, shdr, polys[n]);

				break;

		case MD_NODE_CALL:
				log_debug("\t\t\t-- Call node --");
				in.read((char *) &nc, sizeof(NodeCall));
				// the polygons are read sequentially, and processed

				polycount = nc.pgon_before_count + nc.pgon_after_count;
				log_verbose("\t\t\tPolygon count: %d", polycount);

				in.read((char *) polys, sizeof(short) * polycount);
				for (int n = 0; n < polycount; n++)
					loadPolygon(in, thdr, hdr, hdr2, objidx, shdr, polys[n]);

			break;

		case MD_NODE_RAW:
				log_debug("\t\t\t-- Raw node --");
				in.read((char *) &nr, sizeof(NodeRaw));
				// the polygons are read sequentially, and processed

				in.read((char *) polys, sizeof(short) * (nr.pgon_count));
				log_verbose("\t\t\tPolygon count: %d", nr.pgon_count);

				for (int n = 0; n < nr.pgon_count; n++)
					loadPolygon(in, thdr, hdr, hdr2, objidx, shdr, polys[n]);

			break;

		default: log_error("Unknown node type %d at offset %04lX", splittype,  hdr.offset_nodes + offset);
	}
}
예제 #8
0
void SpinAdapted::OneElectronArray::ReadFromDumpFile(ifstream& dumpFile, int norbs) {
  pout << "OneElectronArray::ReadFromDumpFile is deprecated" << endl;
  if (bin) {
    dumpFile.seekg (0, ios::end);
    //int size = dumpFile.tellg();
    double size = dumpFile.tellg();
    dumpFile.seekg (0, ios::beg);
    FORTINT nmo = rhf ? static_cast<int>(2*sqrt(size / (sizeof(double)))) : static_cast<int>(sqrt(size / (sizeof(double))));
    ReSize(nmo);
    if (rhf) nmo /= 2;
    char buffer[nmo*nmo*sizeof(double)] ;
    dumpFile.read(buffer, nmo*nmo*sizeof(double));
    Matrix Aoints, Moints;
    Aoints.ReSize(nmo,nmo);
    Moints.ReSize(nmo, nmo);
    Aoints = 0;
    for(int i=0;i<nmo;++i)
      for(int j=0; j<nmo; ++j)
	{
	  int a=i,b=j;
	  Aoints(a+1,b+1) = ((double*)(&buffer[(i*nmo +j)*sizeof(double)]))[0];
	}
    
    //the above are the ao integrals...need mo integrals
    //first read the mo coefficients
    ifstream moCoeff;
    moCoeff.open("42.0", ios::binary);
    
    if (rhf) {
      Matrix CoeffMatrix;
      CoeffMatrix.ReSize(nmo, nmo); 
      char coeffchars[nmo*nmo*sizeof(double)];
      moCoeff.read(coeffchars, nmo*nmo*sizeof(double));
      double* coeffs = ((double*)(coeffchars));
      
      for (int i=0; i<nmo; i++)
	for (int j=0; j<nmo; j++) {
	  CoeffMatrix(i+1,j+1) = coeffs[i*nmo+j];
	}
      
      moCoeff.read(coeffchars, nmo*sizeof(double));
      double* occnums = ((double*)(coeffchars));
      Occnum.resize(2*nmo);
      for (int i=0; i<nmo; i++) {
	Occnum.at(2*i) = occnums[i];
	Occnum.at(2*i+1) = occnums[i];
      }
      
      double scale=1.0, cfactor=0.0;
      double* inter = new double[nmo*nmo];
      char n='n', t='t';
      dgemm_ (&n, &n, &nmo, &nmo, &nmo, &scale, Aoints.Store(), &nmo, CoeffMatrix.Store (), &nmo, &cfactor, inter, &nmo);
      dgemm_ (&t, &n, &nmo, &nmo, &nmo, &scale, CoeffMatrix.Store (), &nmo, inter, &nmo, &cfactor, Moints.Store (), &nmo);
      delete [] inter;
      
      
      for(int i=0;i<nmo;++i)
	for(int j=0; j<nmo; ++j)
	  {
	    int a=i,b=j;
	    if (rhf)
	      {
		a*=2;
		b*=2;
	      }
	    (*this)(a,b) = Moints(a/2+1, b/2+1);
	  }
      
    }
  }
  else {
    int n = 0;
    string msg; int msgsize = 5000;
    Input::ReadMeaningfulLine(dumpFile, msg, msgsize);
    vector<string> tok;
    boost::split(tok, msg, is_any_of(" \t"), token_compress_on);
    if (tok.size() != 1) {
      perr << "The first line of one electron integral file should be number of orbitals"<<endl;
      perr << "Error at line :"<<msg<<endl;
      abort();
    }
    if (atoi(tok[0].c_str()) != norbs) {
      perr << "Number of orbitals in one electron integral file should be equal to one given in input file"<<endl;
      perr << "# orbs in input file : "<<norbs<<endl;
      perr << "# orbs in one electron integral file : "<<atoi(tok[0].c_str())/2<<endl;
      abort();
    }
    n = norbs;
    
    if (rhf)
      {
	n=2*n;
      }
    
    ReSize(n);
    int i, j;
    
    Input::ReadMeaningfulLine(dumpFile, msg, msgsize);
    while (msg.size() != 0)
      {
	boost::split(tok, msg, is_any_of(" \t"), token_compress_on);
	if (tok.size() != 3) {
	  perr<< "The format of one electron integral file incorrect"<<endl;
	  perr <<"error at this line: "<<msg<<endl;
	  abort();
	}
	i = atoi(tok[0].c_str());
	j = atoi(tok[1].c_str());
	if (i >= n || j >= n) {
	  perr << "index of orbitals in one electron integral file cannot be bigger than "<<n<<endl;
	  perr<< "error at this line: "<<msg<<endl;
	  abort();
	}
	if (rhf)
	  {
	    i=2*i;
	    j=2*j;
	  }
	(*this)(i, j) = atof(tok[2].c_str());
	
	msg.resize(0);
	Input::ReadMeaningfulLine(dumpFile, msg, msgsize);
      }
  }
}
예제 #9
0
int main(int argc, char** argv)
{
    if (argc < 3)
    {
        cout << "Not enough arguments" << endl;
        return 1;
    }

    tested_cpg.open(argv[1]);
    true_cpg.open(argv[2]);

    if (!tested_cpg.good())
    {
        cout << "Bad tested cpg file" << endl;
        return 2;
    }
    if (!true_cpg.good())
    {
        cout << "Bad true cpg file" << endl;
        return 2;
    }

    if (!strcmp(argv[3], "--hits"))
    {
        Count_Hits();
    }

    tested_cpg.clear();
    true_cpg.clear();
    tested_cpg.seekg(0, tested_cpg.beg);
    true_cpg.seekg(0, true_cpg.beg);

    uint total_cpg_length = 0;
    uint total_tested_length = 0;

    string tested_line;
    string true_line;
    uint tested_start_interval = 0;
    uint tested_end_interval = 0;
    uint prev_tested_end = 0;
    uint true_start_interval = 0;
    uint true_end_interval = 0;
    uint tested_not_in_true = 0;
    uint common = 0;
    uint true_not_in_tested = 0;
    vector<string> tested_line_tokens;
    vector<string> true_line_tokens;
    bool update_true_intervals = true;
    bool update_test_intervals = true;

    while (tested_cpg || true_cpg)
    {
        if (update_test_intervals)
        {
            prev_tested_end = tested_end_interval;
            update_test_intervals = false;
            getline(tested_cpg, tested_line);
            Split(tested_line, '\t', tested_line_tokens);
            tested_start_interval = atoi(tested_line_tokens[1].c_str());
            tested_end_interval = atoi(tested_line_tokens[2].c_str());

            if (!tested_line.empty())
            {
                total_tested_length += tested_end_interval - tested_start_interval + 1;
            }
        }

        if (update_true_intervals)
        {
            update_true_intervals = false;
            getline(true_cpg, true_line);
            Split(true_line, '\t', true_line_tokens);
            true_start_interval = atoi(true_line_tokens[1].c_str());
            true_end_interval = atoi(true_line_tokens[2].c_str());

            if (!true_line.empty())
            {
                total_cpg_length += true_end_interval - true_start_interval + 1;
            }
        }

        if (true_line.empty() && tested_line.empty())
        {
            break;
        }

        if (true_line.empty())
        {
            tested_not_in_true += tested_end_interval - tested_start_interval + 1;
            update_test_intervals = true;
            continue;
        }

        if (tested_line.empty())
        {
            true_not_in_tested += true_end_interval - true_start_interval + 1;
            update_true_intervals = true;
            continue;
        }

        //  ####  ####
        // #############
        if (tested_start_interval <= true_end_interval && prev_tested_end > true_start_interval)
        {
            true_not_in_tested += tested_start_interval - prev_tested_end - 1;
        }

        //   ####      #####
        // ##########
        if (tested_start_interval > true_end_interval && prev_tested_end < true_end_interval && prev_tested_end >= true_start_interval)
        {
            true_not_in_tested += true_end_interval - prev_tested_end;
        }

        //  #####               #######
        //         ########
        if (tested_start_interval > true_end_interval && prev_tested_end < true_start_interval)
        {
            true_not_in_tested += true_end_interval - true_start_interval + 1;
            update_true_intervals = true;
        }

        // ###   ###
        //     #########
        if (prev_tested_end < true_start_interval && tested_start_interval > true_start_interval && tested_start_interval < true_end_interval)
        {
            true_not_in_tested += tested_start_interval - true_start_interval;
        }

        //tested_not_in_true

        // #####
        //         #######
        if (tested_end_interval < true_start_interval)
        {
            tested_not_in_true += tested_end_interval - tested_start_interval + 1;
            update_test_intervals = true;
        }

        // #####
        //    #######
        if (tested_end_interval >= true_start_interval && tested_start_interval < true_start_interval && tested_end_interval <= true_end_interval)
        {
            tested_not_in_true += true_start_interval - tested_start_interval;
            common += tested_end_interval - true_start_interval + 1;
            update_test_intervals = true;
        }

        // ##########
        //    ###
        if (tested_start_interval < true_start_interval && tested_end_interval > true_end_interval)
        {
            tested_not_in_true += true_start_interval - tested_start_interval;
            common += true_end_interval - true_start_interval + 1;
            tested_start_interval = true_end_interval + 1;
            update_true_intervals = true;
            continue;
        }

        //     ###
        // #########
        if (tested_end_interval <= true_end_interval && tested_start_interval >= true_start_interval)
        {
            common += tested_end_interval - tested_start_interval + 1;
            update_test_intervals = true;
        }

        //           #######
        // ######
        if (tested_start_interval > true_end_interval && tested_start_interval > true_end_interval)
        {
            update_true_intervals = true;
        }

        //    ######
        // ######
        if (tested_end_interval > true_end_interval && tested_start_interval <= true_end_interval)
        {
            common += true_end_interval - tested_start_interval + 1;
            update_true_intervals = true;
            tested_start_interval = true_end_interval + 1;
        }
    }

    cout << "Total true length: " << total_cpg_length << endl;
    cout << "Total tested length: " << total_tested_length << endl;
    cout << "Total tested not in true: " << tested_not_in_true << " (" << setprecision(2) << tested_not_in_true/(double)total_tested_length*100 << "%)" << endl;
    cout << "Total common1: " << common << " (" << setprecision(2) << common/(double)total_tested_length*100 << "%)" << endl;
    cout << "Total common2: " << common << " (" << setprecision(2) << common/(double)total_cpg_length*100 << "%)" << endl;
    cout << "Total true not in tested: " << true_not_in_tested << " (" << setprecision(2) << true_not_in_tested/(double)total_cpg_length*100 << "%)" << endl;

    return 0;
}
예제 #10
0
 string find_header_element (string element, bool is_path) {
     /* method to find a specific element from the simfile and return it
     
     Arguments:
     element: the element name we are searching for (tagname)
     is_path: boolean to trigger the function to use the code to read paths properly
     
     Notes: this method reads the next element from the filestream as the element we
     are searching for after identifying the tagname so the elements need to be separated
     by a whitespace. Returns a string representation of the value, which needs to be
     converted above.
     */
     
     string value;               // value to return
     int failure_counter;        // failure counter (in case no match found)
     int max_failures = 2000;    // maximum number of failures allowed
     string text_read;           // the string read in from the stream
     
     cfile.seekg(0);             // rewind to beginning
     failure_counter = 0;
     do {
         cfile >> text_read;
         failure_counter++;
         if (failure_counter == max_failures) {
             cout << "ERROR: simfile file read error, could not find " << element << endl;
             exit(2);
         }
     }
     while (text_read != element);
     
     /* Notes: here we need to use different read methods for Linux and Windows
     The read method for Windows allows spaces in the filenames (which is necessary
     for the Rscript path. Although it shouldn't be a problem for other elements
     as the user shouldn't have spaces in the pathnames (I hope they read the user
     guide that warns them not to use spaces in pathnames!). In Linux, you can just
     call Rscript with the name 'Rscript' and you don't need the full path.
     
     Alternately, and probably recommended is to add Rscript to your system path
     variable on windows.
     
     I have no idea how any of this works on Mac computers, sorry - so please email me
     if you are compiling on Mac computers.
     */
     
     #ifdef __MINGW32__
     if (is_path) {
         // get the rest of the line if we are searching for a path
         getline (cfile, value);
         
         // keep trimming spaces off the front of the string
         while (value.substr(0, 1) == " ") {
             value = value.substr(1, value.npos);
             if (value.npos == 0) {
                 break;                  // break if there is no string present
             }
         }
     } else {
         cfile >> value;                 // assign next read to the value and return it
     }
     #endif
     
     #ifdef __linux__
     cfile >> value;
     #endif
     
     if (verbose) {
         cout << element << ": " << value << endl;;
     }
     return (value);
 }
unsigned long	readExplicitFile( ifstream& input, unsigned long offset , bool bigEndian , unsigned short target , DicomFileImage& fileInfo )
{
	unsigned long	currentOffset;
	unsigned long	size;
	unsigned short	group;
	unsigned short	element;
	char			type[3];
	unsigned short	lengthShort;
	char*			value;
	bool			found;



	try
	{
		currentOffset = offset;
		found = false;
		type[2] = '\0';
		value = NULL;
		input.seekg(0,ifstream::end);
		size = input.tellg();
		input.seekg(offset,ifstream::beg);
		while( !input.eof()  &&  ( currentOffset = input.tellg() ) < size )
		{
			input.read((char*)&group,2);	// read group code
			input.read((char*)&element,2);	// read element code
			input.read(type,2);	// read value representation
			input.read((char*)&lengthShort,2);	// for "OB","OW",“OF”,“SQ”,“UT”,"UN" unused , else read Value Length

			// if the transfer syntax is Big Endian perform a byte swap
			if ( bigEndian )
			{
				group = _byteswap_ushort(group);
				element = _byteswap_ushort(element);
				lengthShort = _byteswap_ulong(lengthShort);
			}

			if ( group == target  &&  !found )	found = true;
			else if ( group != target  &&  found )	break;
		

			// if the value representation is one of the following
			if ( strcmp(type,"OB") == 0  ||  strcmp(type,"OW") == 0  ||  
				 strcmp(type,"OF") == 0  ||  strcmp(type,"SQ") == 0  ||  
				 strcmp(type,"UN") == 0  ||  strcmp(type,"UT") == 0 )
			{
				int	valueSize;


				// read unlimited text
				value = readUnlimitedText(input,type,false,bigEndian,&valueSize);
				saveInformation(group,element,valueSize,value,fileInfo);
				delete[] value;
				value = NULL;
			}
			else
			{
				if ( lengthShort > 0 )
				{
					// read data equal to the defined length
					value = new char[lengthShort];
					input.read(value,lengthShort);
					saveInformation(group,element,sizeof(unsigned short),value,fileInfo);
					delete[] value;
					value = NULL;
				}
			}
		}
	}
	catch( exception& e )
	{
		if ( value != NULL )
			delete[] value;

		throw e;
	}



	return currentOffset;
};
unsigned long	readImplicitFile( ifstream& input , unsigned long offset , bool bigEndian , unsigned short target , DicomFileImage& fileInfo )
{
	unsigned long	currentOffset;
	unsigned long	size;
	unsigned short	group;
	unsigned short	element;
	unsigned int	lengthInt;
	char*			value;
	bool			found;
	

	
	try
	{
		currentOffset = offset;
		found = false;
		value = NULL;
		input.seekg(0,ifstream::end);
		size = input.tellg();
		input.seekg(offset,ifstream::beg);
		while( !input.eof()  &&  ( currentOffset = input.tellg() ) < size )
		{
			input.read((char*)&group,2);	// read group code
			input.read((char*)&element,2);	// read element code
			input.read((char*)&lengthInt,4);	// read value length

			// if the transfer syntax is Big Endian perform a byte swap
			if ( bigEndian )
			{
				group = _byteswap_ushort(group);
				element = _byteswap_ushort(element);
				lengthInt = _byteswap_ulong(lengthInt);
			}

			// termination condition update
			if ( group == target  &&  !found )	found = true;
			else if ( group != target  &&  found )	break;


			// if the length is undefined
			if ( lengthInt == 0xFFFFFFFF )
			{
				int	valueSize;



				// read unlimited text 
				value = readUnlimitedText(input,"",true,bigEndian,&valueSize);
				// update DicomFileImage information
				saveInformation(group,element,valueSize,value,fileInfo);
				delete[] value;
				value = NULL;
			}
			else	// if the length is defined
			{
				if ( lengthInt > 0 )
				{
					// read data of size equal to the defined length
					value = new char[lengthInt];
					input.read(value,lengthInt);
					saveInformation(group,element,sizeof(unsigned short),value,fileInfo);
					delete[] value;
					value = NULL;
				}
			}
		}
	}
	catch ( exception &e )
	{
		if ( value != NULL )
			delete[] value;

		throw e;
	}



	return currentOffset;
};