/* Function buildGraph
Takes a properly formatted text input and creates class data
Properly formatted definition
First line contains an int of the number of nodes
Followed by a number of lines equal to the first line with node descriptions
followed by any number of three int with space delimiters
ends with a termination string of 0 0
*/
void GraphL::buildGraph(istream& infile) {
   int fromNode, toNode;              // from and to node ends of edge

   makeEmpty();                       // clear the graph of memory 

   infile >> size;     // read the number of nodes

   if (infile.eof()) return;          // stop if no more data
   
   string s;                          // used to read through to end of line
   getline(infile, s);

   // read graph node information
   for (int i=1; i <= size; i++) {
      // read using setData of the NodeData class,
      // something like: 
      //    adjList[i].data.setData(infile);
      // where adjList is the array of GraphNodes and
      // data is the NodeData object inside of GraphNode
       if(i > MAXNODES-1) {
			getline(infile,s);
		}
	   data[i].setData(infile);
   }
   if(size > MAXNODES-1) {
       size = MAXNODES-1;
   }
   // read the edge data and add to the adjacency list
   for (;;) {
      infile >> fromNode >> toNode;
      if (fromNode ==0 && toNode ==0) return;     // end of edge data
	  insertEdge(fromNode, toNode);
      // insert the edge into the adjacency list for fromNode

   }
}
Exemple #2
0
void readlist (list<T> &mylist, istream &stream, bool &isunique) {
   try {
      for (;;) {
         try {
            string line;
            getline (stream, line);
            if (stream.eof() ) {
               break;
            }
            T elem = from_string<T> (line);
            if (isunique == true) {
               if ( !mylist.contains_elem (elem) ) mylist.push (elem);
            } else {
               mylist.push (elem);
            }
         } catch (list_exn exn) {
            // If there is a problem discovered in any function, an
            // exn is thrown and printed here.
            complain() << exn.what() << endl;
         }
      }
   } catch (list_exit_exn) {
   }
}
// menu prompts for and accepts an option selection from standard input and
// returns the character identifying the selected option
//
char menu(istream& is) {
    char c;
    int  ok = false;

    cout << '\n';
    cout << "Please select from the following options :\n";
    cout << " P - Place an order with a publisher\n";
    cout << " S - Place a special order with a publisher\n";
    cout << " A - Add one copy to an existing order\n";
    cout << " D - Record a delivery from a publisher\n";
    cout << " V - View status of books on order\n";
    cout << " Q - Quit\n";
    do {
        cout << " Your selection : ";
        c = ' ';
        is.get(c);
        if (c >= 'a' && c <= 'z')
            c -= 'a' - 'A';
        if (is.fail()) {
            is.clear();
            is.ignore(2000, '\n');
            cerr << " Invalid input.  Try again." << endl;
        } else if (c == '\n') {
            ; // no input - try again
        } else if (c != 'P' && c != 'S' && c != 'A' && c != 'D' && c != 'V'
         && c != 'Q') {
            is.ignore(2000, '\n');
            cerr << " Invalid Character.  Try again." << endl;
        } else if (is.get() != '\n') {
            is.ignore(2000, '\n');
            cerr << " Trailing Characters.  Try Again. " << endl;
        } else if (c == 'P' || c == 'S' || c == 'A' || c == 'D' || c == 'V'
         || c == 'Q')
            ok = true;
    } while (ok == 0);

    return c;
}
Exemple #4
0
void read_var_(istream& sf, int& fl, double& var, double* same_var, int& i)
{
  i = 0;
  double tmp = HUGE_VAL;
  sf >> ws;
  if (sf.peek() == '*')
  {
    fl = series::fixed;
    sf.ignore();
  }
  else if (sf.peek() == '#')
  {
    fl = series::same;
    sf.ignore();
    if (isdigit(sf.peek()))
    {
      sf >> i;
      if (i >= nvar) i = 0;
      sf.clear();
    }
Exemple #5
0
//Read the contents of a poll(gender, age, year, studies)
//Receives as parameter the stream from which it reads
Poll read_poll(istream& list_students) {
  char current_studies[SIZE];
  Poll current_poll;
  
  //Set gender as 1 if it is male and 0 otherwise
  current_poll.gender = (list_students.get() == 'M' ? 1 : 0);
  list_students.ignore(1); //Skip the ','
  
  list_students >> current_poll.age;
  list_students.ignore(1); //Skip the ','
  
  list_students.get(current_studies, SIZE, ',');
  current_poll.studies = string(current_studies);
  list_students.ignore(1); //Skip the ','
  
  list_students >> current_poll.year;
  list_students.ignore(1); //Skip the '\n'
  
  return current_poll;
}
Exemple #6
0
void fillVector(vector<int> &v, istream &ist, char terminator)
{
	int x;
	while(ist >> x)
		v.push_back(x);
	if(ist.bad())
		error("Some unusual error occurred, stream is in bad state.");
	if(ist.eof())
		return;
	if(ist.fail()) {
		ist.clear();	// clear stream state
		char c;
		ist >> c;
		if(c == terminator) {
			cout << "found terminator\n";
			return;
		}
		ist.unget();
		ist.clear(ios_base::failbit);	// set the state to fail
	}
Exemple #7
0
      void load(istream &in){

	 ulint w_size;

	 in.read((char*)&w_size,sizeof(w_size));

	 if(w_size>0){

	    words = vector<uint64_t>(w_size);
	    in.read((char*)words.data(),sizeof(uint64_t)*w_size);

	 }

	 in.read((char*)&psum_,sizeof(psum_));

	 in.read((char*)&MASK,sizeof(MASK));

	 in.read((char*)&size_,sizeof(size_));

	 in.read((char*)&width_,sizeof(width_));

	 in.read((char*)&int_per_word_,sizeof(int_per_word_));

      }
Exemple #8
0
void ImagePatch::readFromStreamBinary(istream& in) {

	char hasData;// = (char)(data != NULL);
	char hasIntegralData;// = (char) (integralData!=NULL);
	int dataRowWidth;


	in.read((char*)&width, sizeof(int));
	in.read((char*)&height, sizeof(int));
	in.read((char*)&dataRowWidth, sizeof(int));
	in.read(&hasData,sizeof(char));
	if (hasData) {
		imgData.create(height, width, CV_8U);
		unsigned char* data = imgData.data;
		in.read((char*)data, height*imageDataRowBytes());
	}
	in.read(&hasIntegralData,sizeof(char));
	if (hasIntegralData) {
		intData.create(height+1, width+1, CV_32S);
		Size s = getIntegralSize();
		integral_type* integralData = (integral_type*) intData.data;
		in.read((char*)integralData,s.height*integralDataRowBytes());
	}
}
void EndlineFilter::applyFilter(istream & inStream, ostream & outStream)
{
	while (inStream.good()) 
	{
		char currentLetter;
		currentLetter = inStream.get();
		while (inStream.good())
		{
			if (currentLetter != '\n')
			{
				outStream << currentLetter;
				currentLetter = inStream.get();
			}
			else
			{
				outStream << "<br>" << " ";
				currentLetter = inStream.get();
			}
		}
		inStream.unget();
	}
};
	void load(istream &in){

		ulint encode_size;
		ulint decode_size;

		in.read((char*)&encode_size,sizeof(encode_size));
		in.read((char*)&decode_size,sizeof(decode_size));

		for(ulint i=0;i<encode_size;++i){

			char_type c;
			in.read((char*)&c,sizeof(c));

			vector<bool> B;
			load_vec_bool(in,B);

			encode_.insert({c,B});

		}

		for(ulint i=0;i<decode_size;++i){

			vector<bool> B;
			load_vec_bool(in,B);

			char_type c;
			in.read((char*)&c,sizeof(c));

			decode_.insert({B,c});

		}

		in.read((char*)&sigma,sizeof(sigma));

		in.read((char*)&log_sigma,sizeof(log_sigma));

		in.read((char*)&enc_type,sizeof(enc_type));

	}
Exemple #11
0
  static void ReadString (istream & ist, char * str)
  {
    //char * hstr = str;
    char ch;

    for (;;)
      {
	ist.get(ch);
	if (!ist.good()) break;

	if (!isspace (ch))
	  {
	    ist.putback (ch);
	    break;
	  }
      }

    for (;;)
      {
	ist.get(ch);
	if (!ist.good()) break;
	if (isalpha(ch) || isdigit(ch))
	  {
	    *str = ch;
	    str++;
	  }
	else
	  {
	    ist.putback (ch);
	    break;
	  }
      }
    *str = 0;
    //  cout << "Read string (" << hstr << ")" 
    //       << "put back: " << ch << endl;
  }
int getFunctionInput(istream &fin, FunctionFormat &function)
{
	function.prepUp();
	fin.ignore(numeric_limits<streamsize>::max(), ':');
	if(fin.eof())
		return 1; //Could not find a function
	fin>>function.newname;
	if(!fin)
		return 1; //There was nothing after colon
	while(fin)
	{
		int temp;
		fin>>temp;
		if(fin)
			function.addReturn(temp);
	}
	fin.clear();
	while(fin.peek()!=':' && !fin.eof())
	{
		char temp_string[1000];
		fin.getline(temp_string, 1000);
		stringstream case_string_stream(temp_string);
		Case temp_case;
		case_string_stream>>temp_case.oldname;
		while(case_string_stream)
		{		
			int temp_int;
			case_string_stream>>temp_int;
			if(case_string_stream)
				temp_case.addArg(temp_int);
		}
		
		function.addCase(temp_case);
	}
	return 0;
}
Exemple #13
0
//---------------------------------------------------------------------------
void EclipseGridParser::readImpl(istream& is)
//---------------------------------------------------------------------------
{
    if (!is) {
        cerr << "Could not read given input stream." << endl;
        throw exception();
    }

    // Make temporary maps that will at the end be swapped with the
    // member maps
    // NOTE: Above is no longer true, for easier implementation of
    //       the INCLUDE keyword. We lose the strong exception guarantee,
    //       though (of course retaining the basic guarantee).
    map<string, vector<int> >& intmap = integer_field_map_;
    map<string, vector<double> >& floatmap = floating_field_map_;

    // Actually read the data
    std::string keyword;
    while (is.good()) {
        is >> ignoreWhitespace;
        bool ok = readKeyword(is, keyword);
        if (ok) {
            //#ifdef VERBOSE
            cout << "Keyword found: " << keyword << endl;
            //#endif
            FieldType type = classifyKeyword(keyword);
            // std::cout << "Classification: " << type << std::endl;
            switch (type) {
            case Integer: {
                readVectorData(is, intmap[keyword]);
                break;
            }
            case FloatingPoint: {
                readVectorData(is, floatmap[keyword]);
                break;
            }
            case Timestepping: {
                SpecialMap& sm = special_field_by_epoch_[current_epoch_];
                if (start_date_.is_not_a_date()) {
                    // Set it to START date, or default if no START.
                    // This will only ever happen in the first epoch,
                    // upon first encountering a timestepping keyword.
                    if (hasField("START")) {
                      start_date_ = getSTART().date;
                    } else {
                      start_date_ = boost::gregorian::date( 1983 , 1 , 1 );
                    }
                }
                if (current_reading_mode_ == Regular) {
                    current_reading_mode_ = Timesteps;
                }
                // Get current epoch's TSTEP, if it exists, create new if not.
                SpecialMap::iterator it = sm.find("TSTEP");
                TSTEP* tstep = 0;
                if (it != sm.end()) {
                    tstep = dynamic_cast<TSTEP*>(it->second.get());
                } else {
                    SpecialFieldPtr sb_ptr(new TSTEP());
                    tstep = dynamic_cast<TSTEP*>(sb_ptr.get());
                    sm["TSTEP"] = sb_ptr;
                }
                assert(tstep != 0);
                // Append new steps to current TSTEP object
                if (keyword == "TSTEP") {
                    const int num_steps_old = tstep->tstep_.size();
                    tstep->read(is); // This will append to the TSTEP object.
                    const double added_days
                        = std::accumulate(tstep->tstep_.begin() + num_steps_old, tstep->tstep_.end(), 0.0);
                    current_time_days_ += added_days;
                } else if (keyword == "DATES") {
                    DATES dates;
                    dates.read(is);
                    for (std::size_t dix = 0; dix < dates.dates.size(); ++dix) {
                        boost::gregorian::date_duration since_start = dates.dates[dix] - start_date_;
                        double step = double(since_start.days()) - current_time_days_;
                        tstep->tstep_.push_back(step);
                        current_time_days_ = double(since_start.days());
                    }
                } else {
                    OPM_THROW(std::runtime_error, "Keyword " << keyword << " cannot be handled here.");
                }
                break;
            }
            case SpecialField: {
                if (current_reading_mode_ == Timesteps) {
                    // We have been reading timesteps, but have
                    // now encountered something else.
                    // That means we are in a new epoch.
                    current_reading_mode_ = Regular;
                    special_field_by_epoch_.push_back(SpecialMap());
                    ++current_epoch_;
                    assert(int(special_field_by_epoch_.size()) == current_epoch_ + 1);
                    // Add clones of all existing special fields to new map.
                    SpecialMap& oldmap = special_field_by_epoch_[current_epoch_ - 1];
                    SpecialMap& newmap = special_field_by_epoch_[current_epoch_];
                    for (SpecialMap::iterator it = oldmap.begin(); it != oldmap.end(); ++it) {
                        // if (it->first != "TSTEP") {
                            newmap[it->first] = cloneSpecialField(it->first, it->second);
                            //}
                    }
                    //assert(newmap.count("TSTEP") == 0);
                }
                // Check if the keyword already exists. If so, append. Otherwise, create new.
                SpecialMap::iterator it = special_field_by_epoch_[current_epoch_].find(keyword);
                if (it != special_field_by_epoch_[current_epoch_].end()) {
                    it->second->read(is);
                } else {
                    SpecialFieldPtr sb_ptr = createSpecialField(is, keyword);
                    if (sb_ptr) {
                        special_field_by_epoch_[current_epoch_][keyword] = sb_ptr;
                    } else {
                        OPM_THROW(std::runtime_error, "Could not create field " << keyword);
                    }
                }
                break;
            }
            case IgnoreWithData: {
                ignored_fields_.insert(keyword);
                //is >> ignoreSlashLine;
                //#ifdef VERBOSE
                // cout << "(ignored)" << endl;
                //#endif
                break;
            }
            case IgnoreNoData: {
                ignored_fields_.insert(keyword);
                //is >> ignoreLine;
                //#ifdef VERBOSE
                // cout << "(ignored)" << endl;
                //#endif
                break;
            }
            case Include: {
                string include_filename = readString(is);
                if (!directory_.empty()) {
                    include_filename = directory_ + '/' + include_filename;
                }
                ifstream include_is(include_filename.c_str());
                if (!include_is) {
                    OPM_THROW(std::runtime_error, "Unable to open INCLUDEd file " << include_filename);
                }
                readImpl(include_is);
                //              is >> ignoreSlashLine;
                break;
            }
            case Import: {
                string import_filename = readString(is);
                if (!directory_.empty()) {
                    import_filename = directory_ + '/' + import_filename;
                }
                getNumericErtFields(import_filename);
                break;
            }
            case Unknown:
            default:
                ignored_fields_.insert(keyword);
                cout << "*** Warning: keyword " << keyword << " is unknown." << endl;
                //is >> ignoreSlashLine;
                //throw exception();
            }
        } else {
            // if (!ok)
            is >> ignoreLine;
        }
    }
}
Exemple #14
0
static bool getDump (istream&          ifile,
		     ostream&          ofile,
		     vector<Data2DF*>& u    )
// ---------------------------------------------------------------------------
// Read next set of field dumps from ifile, put headers on ofile.
// ---------------------------------------------------------------------------
{
  static char* hdr_fmt[] = { 
    "%-25s "    "Session\n",
    "%-25s "    "Created\n",
    "%-25s "    "Nr, Ns, Nz, Elements\n",
    "%-25d "    "Step\n",
    "%-25.6g "  "Time\n",
    "%-25.6g "  "Time step\n",
    "%-25.6g "  "Kinvis\n",
    "%-25.6g "  "Beta\n",
    "%-25s "    "Fields written\n",
    "%-25s "    "Format\n"
  };
  char  buf[StrMax], fmt[StrMax], fields[StrMax];
  int_t i, j, swab, nf, np, nz, nel;

  if (ifile.getline(buf, StrMax).eof()) return false;
  
  if (!strstr (buf, "Session")) message (prog, "not a field file", ERROR);
  ofile << buf << endl;
  ifile.getline (buf, StrMax);
  ofile << buf << endl;

  // -- I/O Numerical description of field sizes.

  ifile >> np >> nz >> nz >> nel;
  ifile.getline (buf, StrMax);
  
  sprintf (fmt, "%1d %1d %1d %1d", np, np, nz, nel);
  sprintf (buf, hdr_fmt[2], fmt);
  ofile << buf;

  for (i = 0; i < 5; i++) {
   ifile.getline (buf, StrMax);
   ofile << buf << endl;
  }

  // -- I/O field names.

  ifile >> fields;
  nf = strlen (fields);
  for (j = 0, i = 0; i < nf; i++) fmt[j++] = fields[i];
  fmt[j] = '\0';
  sprintf (buf, hdr_fmt[8], fmt);
  ofile << buf;
  ifile.getline (buf, StrMax);

  // -- Arrange for byte-swapping if required.

  ifile.getline (buf, StrMax);

  swab = doSwap (buf);

  sprintf (buf, "binary ");
  Veclib::describeFormat (buf + strlen (buf));
  sprintf (fmt, hdr_fmt[9], buf);
  ofile << fmt;

  if (u.size() != nf) {
    u.resize (nf);
    for (i = 0; i < nf; i++) u[i] = new Data2DF (np, nz, nel, fields[i]);
  }

  for (i = 0; i < nf; i++) {
    ifile >> *u[i];
    if (swab) u[i] -> reverse();
  }

  return ifile.good();
}
void 
readMesh  ( istream &is, chunkSize size,char *name )
{
	tab++;

	vector<Point> vertex;
	vector<TriangleIndex> triangle;

	while (1)
	{
		chunkSize subChunkSize = 0;
		chunkID   subChunkID = 0;

		if (!(subChunkID = readChunk(is,subChunkSize))) break;

		switch (subChunkID)
		{
		case 0x4110:						// Vertex List
			{
				unsigned short number;
				is.read((char *) (void *) &number,2);

				TAB(1);
				DOUT(1) << "vertex list: " << number << " points." << endl;

				char buffer[12];

				for (int n=0;n<number;n++)
				{
					is.read(buffer,12);
					vertex.push_back
					( 
						Point
						(
							Vector
							(
								*((float *) &buffer[0]),
								*((float *) &buffer[4]),
								*((float *) &buffer[8])
							),
							Vector0
						)

					);

					TAB(2);
					DOUT(2) << vertex.back() << endl;
				}
			}
			break;
		case 0x4120:						// Face List
			{
				unsigned short number;
				is.read((char *) (void *) &number,2);

				TAB(1);
				DOUT(1) << "face list: " << number << " faces." << endl;

				char buffer[8];

				for (int n=0;n<number;n++)
				{
					is.read(buffer,8);

					TriangleIndex tri;

					tri.a = *((unsigned short *) &buffer[0]);
					tri.b = *((unsigned short *) &buffer[2]);
					tri.c = *((unsigned short *) &buffer[4]);

					if (tri.a>=vertex.size() || tri.b>=vertex.size() || tri.c>=vertex.size())
						cerr << "Error: Face data out of range." << endl;
					else
					{
						triangle.push_back(tri);

						TAB(2);
						DOUT(2) << tri.a << ',' << tri.b << ',' << tri.c << endl;
					}
				}
			}
			break;
		case 0x4130:						// Face Material
			readMaterial(is,subChunkSize-6);
			break;
		default:
			eatChunk(is,subChunkSize-6);
		}

		if (!subChunkID || size<subChunkSize)
			break;

		size -= subChunkSize;
	}

	if (callback)
		callback(vertex,triangle,name);

	tab--;
}
Exemple #16
0
bool CPlayListASX::LoadData(istream& stream)
{
  CLog::Log(LOGNOTICE, "Parsing ASX");

  if(stream.peek() == '[')
  {
    return LoadAsxIniInfo(stream);
  }
  else
  {
    CXBMCTinyXML xmlDoc;
    stream >> xmlDoc;

    if (xmlDoc.Error())
    {
      CLog::Log(LOGERROR, "Unable to parse ASX info Error: %s", xmlDoc.ErrorDesc());
      return false;
    }

    TiXmlElement *pRootElement = xmlDoc.RootElement();

    // lowercase every element
    TiXmlNode *pNode = pRootElement;
    TiXmlNode *pChild = NULL;
    CStdString value;
    value = pNode->Value();
    value.ToLower();
    pNode->SetValue(value);
    while(pNode)
    {
      pChild = pNode->IterateChildren(pChild);
      if(pChild)
      {
        if (pChild->Type() == TiXmlNode::TINYXML_ELEMENT)
        {
          value = pChild->Value();
          value.ToLower();
          pChild->SetValue(value);

          TiXmlAttribute* pAttr = pChild->ToElement()->FirstAttribute();
          while(pAttr)
          {
            value = pAttr->Name();
            value.ToLower();
            pAttr->SetName(value);
            pAttr = pAttr->Next();
          }
        }

        pNode = pChild;
        pChild = NULL;
        continue;
      }

      pChild = pNode;
      pNode = pNode->Parent();
    }
    CStdString roottitle = "";
    TiXmlElement *pElement = pRootElement->FirstChildElement();
    while (pElement)
    {
      value = pElement->Value();
      if (value == "title")
      {
        roottitle = pElement->GetText();
      }
      else if (value == "entry")
      {
        CStdString title(roottitle);

        TiXmlElement *pRef = pElement->FirstChildElement("ref");
        TiXmlElement *pTitle = pElement->FirstChildElement("title");

        if(pTitle)
          title = pTitle->GetText();

        while (pRef)
        { // multiple references may apear for one entry
          // duration may exist on this level too
          value = pRef->Attribute("href");
          if (value != "")
          {
            if(title.IsEmpty())
              title = value;

            CLog::Log(LOGINFO, "Adding element %s, %s", title.c_str(), value.c_str());
            CFileItemPtr newItem(new CFileItem(title));
            newItem->SetPath(value);
            Add(newItem);
          }
          pRef = pRef->NextSiblingElement("ref");
        }
      }
      else if (value == "entryref")
      {
        value = pElement->Attribute("href");
        if (value != "")
        { // found an entryref, let's try loading that url
          auto_ptr<CPlayList> playlist(CPlayListFactory::Create(value));
          if (NULL != playlist.get())
            if (playlist->Load(value))
              Add(*playlist);
        }
      }
      pElement = pElement->NextSiblingElement();
    }
  }

  return true;
}
Exemple #17
0
bool CPlayListASX::LoadAsxIniInfo(istream &stream)
{
  CLog::Log(LOGINFO, "Parsing INI style ASX");

  string name, value;

  while( stream.good() )
  {
    // consume blank rows, and blanks
    while((stream.peek() == '\r' || stream.peek() == '\n' || stream.peek() == ' ') && stream.good())
      stream.get();

    if(stream.peek() == '[')
    {
      // this is an [section] part, just ignore it
      while(stream.good() && stream.peek() != '\r' && stream.peek() != '\n')
        stream.get();
      continue;
    }
    name = "";
    value = "";
    // consume name
    while(stream.peek() != '\r' && stream.peek() != '\n' && stream.peek() != '=' && stream.good())
      name += stream.get();

    // consume =
    if(stream.get() != '=')
      continue;

    // consume value
    while(stream.peek() != '\r' && stream.peek() != '\n' && stream.good())
      value += stream.get();

    CLog::Log(LOGINFO, "Adding element %s=%s", name.c_str(), value.c_str());
    CFileItemPtr newItem(new CFileItem(value));
    newItem->SetPath(value);
    Add(newItem);
  }

  return true;
}
void process(istream &input, ostream &output) {
    int f = 0;
    bool first = true;
    yuv4mpeg_proxy proxy(input, output);
    Mat mat_y[3];
    Mat mat_u[3];
    Mat mat_v[3];
    mat_y[0] = Mat::zeros(proxy.height(), proxy.width(), CV_8U);
    mat_u[0] = Mat::zeros(proxy.height(), proxy.width(), CV_8U);
    mat_v[0] = Mat::zeros(proxy.height(), proxy.width(), CV_8U);
    mat_y[1] = Mat::zeros(proxy.height(), proxy.width(), CV_8U);
    mat_u[1] = Mat::zeros(proxy.height(), proxy.width(), CV_8U);
    mat_v[1] = Mat::zeros(proxy.height(), proxy.width(), CV_8U);
    mat_y[2] = Mat::zeros(proxy.height(), proxy.width(), CV_8U);
    mat_u[2] = Mat::zeros(proxy.height(), proxy.width(), CV_8U);
    mat_v[2] = Mat::zeros(proxy.height(), proxy.width(), CV_8U);
    while(input.good() && output.good()) {
        int f_prev = (f+1)%2; //which is also the next to write...
        int f_now  = f;
        f = (f+1)%2;

        std::vector<unsigned char> frame;
        //for(int i = 0; i < 10; ++i) //skip
            frame = proxy.get();

        //copy frame into opencv..
        for(int y = 0, i = 0; y < proxy.height(); ++y)
        for(int x = 0; x < proxy.width(); ++x, i+=3) {
            mat_y[f_now].at<unsigned char>(y, x) = frame[i+0];
            mat_u[f_now].at<unsigned char>(y, x) = frame[i+1];
            mat_v[f_now].at<unsigned char>(y, x) = frame[i+2];
        }

        if (!first) {
            static int frame_nr = 0;
#define USE_FLOW_FIX
#define RESIZE
#ifdef RESIZE
            Mat mat_y_a_small;
            Mat mat_y_b_small;
#ifndef USE_FLOW_FIX
            int scaler = 3;
#else
            int scaler = 5;//8
#endif
            cv::resize(mat_y[f_prev], mat_y_a_small, Size(proxy.width()/scaler, proxy.height()/scaler));
            cv::resize(mat_y[f_now ], mat_y_b_small, Size(proxy.width()/scaler, proxy.height()/scaler));
            auto flow = dualOpticalFlow(mat_y_a_small, mat_y_b_small);
            std::get<0>(flow) = scaleOpticalFlow(std::get<0>(flow), mat_y[f_prev].size());
            std::get<1>(flow) = scaleOpticalFlow(std::get<1>(flow), mat_y[f_now ].size());
            //blur  ?
            blur_xy(std::get<0>(flow), scaler*2);
            blur_xy(std::get<1>(flow), scaler*2);
#else
            auto flow = dualOpticalFlow(mat_y[f_prev], mat_y[f_now]);
#endif

            //write first frame
            for(int y = 0, i = 0; y < proxy.height(); ++y)
            for(int x = 0; x < proxy.width(); ++x, i+=3) {
                frame[i+0] = mat_y[f_prev].at<unsigned char>(y, x);
                frame[i+1] = mat_u[f_prev].at<unsigned char>(y, x);
                frame[i+2] = mat_v[f_prev].at<unsigned char>(y, x);
            }
            proxy.set(frame);

            //write 50% interpolation
            int max_j = 2;
            for(int j = 1; j<max_j; ++j) {
                double f = j/double(max_j);

#ifdef USE_FLOW_FIX
                std::tuple<Mat, Mat> fixed_flow;
                std::tie(mat_y[2], fixed_flow) = dualTransformFlow_plusFix(mat_y[f_prev], mat_y[f_now], flow, nullptr    , f, 1.0-f, 1.0-f);
                std::tie(mat_u[2], fixed_flow) = dualTransformFlow_plusFix(mat_u[f_prev], mat_u[f_now], flow, &fixed_flow, f, 1.0-f, 1.0-f);
                std::tie(mat_v[2], fixed_flow) = dualTransformFlow_plusFix(mat_v[f_prev], mat_v[f_now], flow, &fixed_flow, f, 1.0-f, 1.0-f);
#else
                mat_y[2] = dualTransformFlow(mat_y[f_prev], mat_y[f_now], flow, f, 1.0-f, 1.0-f);
                mat_u[2] = dualTransformFlow(mat_u[f_prev], mat_u[f_now], flow, f, 1.0-f, 1.0-f);
                mat_v[2] = dualTransformFlow(mat_v[f_prev], mat_v[f_now], flow, f, 1.0-f, 1.0-f);
#endif

                for(int y = 0, i = 0; y < proxy.height(); ++y)
                    for(int x = 0; x < proxy.width(); ++x, i+=3) {
                        frame[i+0] = mat_y[2].at<unsigned char>(y, x);
                        frame[i+1] = mat_u[2].at<unsigned char>(y, x);
                        frame[i+2] = mat_v[2].at<unsigned char>(y, x);
                    }
                proxy.set(frame);
            }
        } else {
            first = false;
        }
    }
}
MStatus blindDoubleData::readBinary( istream& in, unsigned )
{
    in.read( (char*) &fValue, sizeof( fValue ));
    return in.fail() ? MS::kFailure : MS::kSuccess;
}
Exemple #20
0
void NifStream( Char8String & val, istream& in, const NifInfo & info ) {
	val.resize(8, '\x0');
	for (int i=0; i<8; ++i)
		in.read( &val[i], 1 );
}
Exemple #21
0
/// \brief Parse a FASTA format input into an alignment
///
/// At present, each sequences must contain all of the residues of the corresponding PDB
/// (because the index in the PDB is required in the alignment).
///
/// !!Case insensitive!!
///
/// \todo !URGENT! Test what this does when given structures with incomplete residues near the start.
///       It looks like it gives indices in the PDB, rather than in the protein (which only
///       contains backbone-complete residues). This is a serious issue!
///
/// \todo Generalise this so that it's possible to read alignments against a pdb_list or a protein_list
///
/// The code will attempt to handle missing residues with a warning if there are a small number.
/// It will fail if the percentage is too low.
///
/// \relates alignment
alignment cath::align::read_alignment_from_fasta(istream                  &arg_istream,          ///< The istream from which to read the FASTA input for parsing
                                                 const amino_acid_vec_vec &arg_amino_acid_lists, ///< TODOCUMENT
                                                 const str_vec            &arg_names,            ///< A vector of names, each of which should be found within the corresponding sequence's ID
                                                 ostream                  &arg_stderr            ///< An ostream to which any warnings should be output (currently unused)
                                                 ) {
		if ( arg_amino_acid_lists.empty() ) {
			BOOST_THROW_EXCEPTION(invalid_argument_exception("Cannot load a FASTA alignment with 0 PDB entries"));
		}
		const size_t num_entries = arg_amino_acid_lists.size();
		if ( arg_names.size() != num_entries ) {
			BOOST_THROW_EXCEPTION(invalid_argument_exception("Cannot load a FASTA alignment with a different number of names and PDB entries"));
		}

		arg_istream.exceptions( ios::badbit );

		try {
			const str_str_pair_vec sequence_of_id = read_ids_and_sequences_from_fasta( arg_istream );
			const size_t num_sequences = sequence_of_id.size();
			if ( num_entries != num_sequences ) {
				BOOST_THROW_EXCEPTION(runtime_error_exception(
					"Number of sequences parsed from FASTA ("
					+ lexical_cast<string>( num_sequences )
					+ ") doesn't match the number of PDBs/names ("
					+ lexical_cast<string>( num_entries   )
					+ ")"
				));
			}

			const size_t sequence_length = sequence_of_id.front().second.length();

			opt_aln_posn_vec_vec positions;
			positions.reserve( num_entries );
			for (size_t entry_ctr = 0; entry_ctr < num_entries; ++entry_ctr) {
				const amino_acid_vec &amino_acids     = arg_amino_acid_lists      [ entry_ctr ];
				const string         &name            = arg_names     [ entry_ctr ];
				const str_str_pair   &id_and_sequence = sequence_of_id[ entry_ctr ];
				const string         &id              = id_and_sequence.first;
				const string         &sequence        = id_and_sequence.second;

				if ( sequence.length() != sequence_length ) {
					BOOST_THROW_EXCEPTION(runtime_error_exception(
						"When attempting to parse entry number "
						+ lexical_cast<string>( entry_ctr + 1     )
						+ " of FASTA alignment, the length of the sequence ("
						+ lexical_cast<string>( sequence.length() )
						+ ") does not match the length of the first sequence ("
						+ lexical_cast<string>( sequence_length   )
						+ ")"
					));
				}

				if ( ! icontains( id, name ) ) {
					BOOST_THROW_EXCEPTION(runtime_error_exception(
						"When attempting to parse entry number "
						+ lexical_cast<string>( entry_ctr + 1 )
						+ " of FASTA alignment, name \""
						+ name
						+ "\" could not be found in a case-insensitive search within FASTA header ID \""
						+ id
						+ "\""
					));
				}

				positions.push_back( align_sequence_to_amino_acids( sequence, amino_acids, name, arg_stderr ) );
			}

			return alignment( positions );
		}
		// Catch any I/O exceptions
		catch (const std::exception &ex) {
			const string error_message(string("Cannot read FASTA legacy alignment file [") + ex.what() + "] ");
			perror(error_message.c_str());
			BOOST_THROW_EXCEPTION(runtime_error_exception(error_message));
		};
	}
Exemple #22
0
void aol::Vector<_DataType>::loadRaw ( istream &in, const int Type ) {

  switch ( Type ) {
    case qc::PGM_UNSIGNED_CHAR_ASCII:
      for ( int i = 0; i < this->_size; ++i ) {
        int value;
        in >> value;
        if ( value < 0 || value > 255 )
          throw aol::Exception ( "aol::Vector<DataType>::loadRaw: Illegal ascii number for unsigned char", __FILE__, __LINE__ );
        this->_pData[i] = static_cast<unsigned char> ( value );
      }
      break;
    case qc::PGM_FLOAT_ASCII:
      for ( int i = 0; i < this->_size; ++i ) {
        in >> this->_pData[i];
      }
      break;
    case qc::PGM_UNSIGNED_CHAR_BINARY: {
      unsigned char *buffer = new unsigned char[this->_size];
      in.read ( reinterpret_cast<char*> ( buffer ),  this->_size );
      for ( int i = 0; i < this->_size; ++i ) this->_pData[i] = static_cast<DataType> ( static_cast<unsigned char> ( buffer[i] ) );
      delete[] buffer;
    }
    break;
    case qc::PGM_FLOAT_BINARY:
      if ( sizeof ( float ) == sizeof ( DataType ) ) {
        in.read ( reinterpret_cast<char*> ( this->_pData ), this->_size * sizeof ( float ) );
      } else {
        float *buffer = new float[ this->_size];
        in.read ( reinterpret_cast<char*> ( buffer ),  this->_size * sizeof ( float ) );
        for ( int i = 0; i < this->_size; ++i ) this->_pData[i] =  static_cast<DataType> ( buffer[i] );
        delete[] buffer;
      }
      break;
    case qc::PGM_DOUBLE_BINARY:
      if ( sizeof ( double ) == sizeof ( DataType ) ) {
        in.read ( reinterpret_cast<char*> ( this->_pData ),  this->_size * sizeof ( double ) );
      } else {
        double *buffer = new double[ this->_size];
        in.read ( reinterpret_cast<char*> ( buffer ),  this->_size * sizeof ( double ) );
        for ( int i = 0; i < this->_size; ++i ) this->_pData[i] =  static_cast<DataType> ( buffer[i] );
        delete[] buffer;
      }
      break;
    case qc::PGM_UNSIGNED_SHORT_BINARY: {
      unsigned short *buffer = new unsigned short[ this->_size];
      in.read ( reinterpret_cast<char*> ( buffer ),  this->_size * sizeof ( unsigned short ) );
      for ( int i = 0; i < this->_size; ++i ) this->_pData[i] = static_cast<DataType> ( static_cast<unsigned short> ( buffer[i] ) );
      delete[] buffer;
      break;
    }
    case qc::PGM_SHORT_BINARY: {
      short *buffer = new short[ this->_size];
      in.read ( reinterpret_cast<char*> ( buffer ),  this->_size * sizeof ( short ) );
      for ( int i = 0; i < this->_size; ++i ) this->_pData[i] = static_cast<DataType> ( buffer[i] );
      delete[] buffer;
      break;
    }
    case qc::PGM_UNSIGNED_INT_BINARY: {
      aol::readBinaryData<uint32_t, DataType> ( in, this->_pData, this->_size );
      break;
    }
    case qc::PGM_SIGNED_INT_BINARY: {
      aol::readBinaryData<int32_t, DataType> ( in, this->_pData, this->_size );
      break;
    }
    default:
      throw aol::TypeException ( "Illegal PGM_TYPE", __FILE__, __LINE__ );
  }
  if ( in.fail() )
    throw aol::FileException ( "aol::Vector<DataType>::loadRaw: Reading from istream failed", __FILE__, __LINE__ );
}
Exemple #23
0
void read_series(istream& df, const string &name)
{
  char ch;
  vec_convert vc;
  series t;
  while (df)
  {
    t.init();
    vc.clear();
    df >> ws;
    if (df.peek() == '*')
    {
      t.hide = true;
      df.ignore();
    }
    if (hide_series)
      t.hide = true;
    df >> ws;
    ch = get_token(df, t.ID);
    if (isspace(ch))
      ch = skip_until(df);
    if (ch != ',')
      continue;
    df >> ws;
    string str;
    ch = get_token(df, str);
    try
    {
      t.f = FindResidual(str);
    }
    catch (gError) 
    {
      cout << NOEQ << t.ID << " - " << str << " -  series ignored" << endl;
      while (ch = df.get(), ch != ';' &&
                            ch != EOF);
      continue;
    }
    parser p(df, name);
    SGML el;
    if (isspace(ch))
      while (df >> ws, ch = df.peek(), ch == '<')
      {
        p.GetSGML(el);
        el.compare("var");
        str = el.FindString("name");
        if (!str.empty())
        {
          t.vs.push_back(str);
          t.vx.push_back(el.FindDouble("value"));
        }
      }
    if (ch != ',')
      ch = skip_until(df);
    if (ch == ',')
      df.ignore();
    else
      continue;
    while (df >> ws, ch = df.peek(), ch == '<')
    {
      vc.push_back(convert());
      vc.back().read(p.GetSGML(el));
    }
    if (ch == ',')
      df.ignore();
    t.read(df, 0, t.f.NameOfX(), t.f.ScaleOfX());
    if (!t)
      continue;
    if (vc.empty())
      ser.push_back(t);
    else
    {
      series t1;
      t1.hide = t.hide;
      t1.ID = t.ID;
      t1.f = t.f;
      t1.vs = t.vs;
      t1.vx = t.vx;
      t1.set(t.Ntot(), vc.size());
      for (size_t i = 0; i < vc.size(); ++i)
      {
        if (vc[i].name().empty())
          throw gError("read_series: no name while converting");
        else
          t1.name(i) = vc[i].name();
      }
      t1.scale() = t.scale();
      t1.NOfX() = SearchString(t1.names(), t1.f.NameOfX());
      for (size_t i = 0; i < vc.size(); ++i)
        vc[i].SetID(t.names());
      for (size_t i = 0; i < t.Ntot(); ++i)
      {
        t1.atr(i) = t.atr(i);
        for (size_t j = 0; j < vc.size(); ++j)
          t1(i, j) = vc[j](t(i));
      }
      t1.set_av();
      ser.push_back(t1);
    }
    ser.back().f.SetInput(ser.back().names());
    ser.back().f.SetOnceInput(ser.back().vs);
  }
}
Exemple #24
0
void geometry::scan_body(istream& f, INT geo_nr, BYTE *geo_label)
{
	INT v, b, i, j, a, a1, l;
	BYTE buf[MYBUFSIZE];
	BYTE *p_str;
	
	v = -1;
	X().m_mn(0, 0);
	point_labels().m_l(0);
	block_labels().m_l(0);
	f_row_decomp() = FALSE;
	f_col_decomp() = FALSE;
	f_ddp() = FALSE;
	f_ddb() = FALSE;
	f_canonical_labelling_points() = FALSE;
	f_canonical_labelling_blocks() = FALSE;
	f_aut_gens() = FALSE;
	aut_gens().m_l(0);
	ago().m_i_i(0);

	number() = geo_nr;
	label().init(geo_label);
	cout << "reading GEOMETRY " << number() << " " << label() << endl;
	
	while (TRUE) {
		if (f.eof()) {
			cout << "geometry::scan() primature end of file" << endl;
			exit(1);
			}

		f.getline(buf, sizeof(buf));
		if (strncmp(buf, "v=", 2) == 0) {
			sscanf(buf, "v=%ld b=%ld", &v, &b);
			point_labels().m_l(v);
			for (i = 0; i < v; i++) {
				point_labels().m_ii(i, i);
				}
			block_labels().m_l(b);
			for (i = 0; i < b; i++) {
				block_labels().m_ii(i, i);
				}
			}
		else if (strncmp(buf, "INCIDENCE_MATRIX", 16) == 0) {
			// cout << "reading INCIDENCE_MATRIX" << endl;
			X().m_mn_n(v, b);
			for (i = 0; i < v; i++) {
				if (f.eof()) {
					cout << "geometry::scan() primature end of file" << endl;
					exit(1);
					}

				f.getline(buf, sizeof(buf));
				for (j = 0; j < b; j++) {
					if (buf[j] == 'X') {
						X().m_iji(i, j, 1);
						}
					}
				}
			f_incidence_matrix() = TRUE;
			}
		else if (strncmp(buf, "INTEGER_MATRIX", 16) == 0) {
			// cout << "reading INTEGER_MATRIX" << endl;
			X().m_mn_n(v, b);
			for (i = 0; i < v; i++) {
				if (f.eof()) {
					cout << "geometry::scan() primature end of file" << endl;
					exit(1);
					}
				for (j = 0; j < b; j++) {
					f >> a;
					X().m_iji(i, j, a);
					}
				}
			f_incidence_matrix() = FALSE;
			}
		else if (strncmp(buf, "LABELLING_OF_POINTS", 19) == 0) {
Exemple #25
0
void SquareMaze::ReadActualMaze(istream& inData)
{
        int x,y;
        int col,row; // in file
        bool onWallX,onWallY;
        char ch; // current character from file
        bool isBoundary;

        for (row=0; row<2*height+1; row++) {
                for (col=0; col<2*width+1; col++) {

                        inData.get(ch);

                        // for convenience, the square we're on, 
                        // or the first square down and to the right, if we're on a wall
                        x=col/2;
                        y=row/2;
                        
                        onWallX=(col%2)==0;
                        onWallY=(row%2)==0;

                        // check for boundary of the maze
                        isBoundary=false;
                        if (!onWallX || !onWallY) {
                                // check for boundaries, but don't do it on diagonal walls,
                                // simply because in those cases we could end up deferencing an
                                // illegal part of the array (e.g. where col==2*width, and row==0, then we do bad stuff for the row==0 case
                                if (col==0) {
                                        getNode(x,y)->canGoDirs[Left]=false;
                                        isBoundary=true;
                                }
                                if (row==0) {
                                        getNode(x,y)->canGoDirs[Up]=false;
                                        isBoundary=true;
                                }
                                if (col==2*width) {
                                        getNode(x-1,y)->canGoDirs[Right]=false;
                                        isBoundary=true;
                                }
                                if (row==2*height) {
                                        getNode(x,y-1)->canGoDirs[Down]=false;
                                        isBoundary=true;
                                }
                        }

                        if (isBoundary) {
                                // we've already dealt with the boundaries of the maze
                        }
                        else {
                                // there are 4 cases for if we're on a wall or not
                                if (onWallX) {
                                        if (onWallY) {
                                                // we're on a diagonal wall, which is meaningless
                                                // so, ignore this
                                        }
                                        else {
                                                // we're on an X-wall
                                                switch (ch) {
                                                case ' ':
                                                        // no wall, so there's nothing to do
                                                        break;
                                                default:
                                                        // just act like it's a wall
                                                case '|':
                                                        // an X-wall is here
                                                        getNode(x-1,y)->canGoDirs[Right]=false;
                                                        getNode(x,y)->canGoDirs[Left]=false;
                                                        break;
                                                }
                                        }
                                }
                                else {
                                        if (onWallY) {
                                                // we're on a Y-wall
                                                switch (ch) {
                                                case ' ':
                                                        // no wall, so there's nothing to do
                                                        break;
                                                default:
                                                        // just act like it's a wall
                                                case '-':
                                                        // an Y-wall is here
                                                        getNode(x,y-1)->canGoDirs[Down]=false;
                                                        getNode(x,y)->canGoDirs[Up]=false;
                                                        break;
                                                }
                                        }
                                        else {
                                                // we're in a square (not on a wall)
                                                // check if it's the start or exit square
                                                switch (ch) {
                                                case '*':
                                                        if (startNode!=NULL) {
                                                                cerr << "SquareMaze WARNING: multiple start nodes.  Ignoring the earlier one\n";
                                                        }
                                                        startNode=getNode(x,y);
                                                        break;
                                                case 'X':
                                                        if (exitNode!=NULL) {
                                                                cerr << "SquareMaze WARNING: multiple exit nodes.  Ignoring the earlier one\n";
                                                        }
                                                        exitNode=getNode(x,y);
                                                        break;
                                                case ' ':
                                                        // this is the expected case, so do nothing
                                                        break;
                                                default:
                                                        cerr << "SquareMaze WARNING: character in cell of maze must be *,X or space.  Character is at x,y coord (" << x << "," << y << ")\n";
                                                        break;
                                                }
                                        }
                                }
                        }
                }

                // read extra return ('\n')
                inData.get(ch);
                if (ch=='\r') {
                        // gracefully handle binary mode files on Windows
                        inData.get(ch);
                }
        }

        if (startNode==NULL || exitNode==NULL) {
                cerr << "SquareMaze WARNING: maze lacks start node or exit node.\n";
        }
}
Exemple #26
0
void PlotManager::processCommand(istream& in)
{
  string buffer, cmd;
  while ( ! in.eof() ) {
    buffer.clear();
    cmd.clear();
    getline(in, buffer);

    // Extract command from buffer
    stringstream ss(buffer); ss >> cmd;
    if ( cmd.empty() || cmd[0] == '#' ) continue;
    int (*pf)(int)=std::toupper;
    transform(cmd.begin(), cmd.end(), cmd.begin(), pf);

    buffer.erase(0, cmd.size()+1);

    typedef escaped_list_separator<char> elsc;
    tokenizer<elsc> tokens(buffer, elsc("\\", " \t", "\""));

    vector<tokenizer<elsc>::value_type> args;

    for(tokenizer<elsc>::const_iterator tok_iter = tokens.begin();
        tok_iter != tokens.end(); ++tok_iter) {
      args.push_back(*tok_iter);
    }

    if ( cmd == "EFFICIENCY" && args.size() >= 4 ) {
      if ( !saveEfficiency(args[0], args[1], args[2], args[3]) ) {
        cerr << "Error : cannot make efficiency plot" << endl;
      }
      continue;
    }

    if ( cmd == "BAYESIANEFFICIENCY" && args.size() >= 4 ) {
      if ( !saveBayesEfficiency(args[0], args[1], args[2], args[3]) ) {
        cerr << "Error : cannot make bayesian efficiency plot" << endl;
      }
      continue;
    }
    
    if ( cmd == "FAKERATE" && args.size() == 4 ) {
      if ( !saveFakeRate(args[0], args[1], args[2], args[3]) ) {
        cerr << "Error : cannot make fakerate plot" << endl;
      }
      continue;
    }
    
    if ( cmd == "RESOLUTIONX" && args.size() == 3 ) {
      if ( !saveResolution(args[0], args[1], args[2], 'X') ) {
        cerr << "Error : cannot make resolution-X plot" << endl;
      }
      continue;
    }
    
    if ( cmd == "RESOLUTIONY" && args.size() == 3 ) {
      if ( !saveResolution(args[0], args[1], args[2], 'Y') ) {
        cerr << "Error : cannot make resolution-Y plot" << endl;
      }
      continue;
    }
    
    if ( cmd == "DUMP" && args.size() == 3 ) {
      if ( !dumpObject(args[0], args[1], args[2]) ) {
        cerr << "Error : cannot copy histogram" << endl;
      }
      continue;
    }

    cerr << "Unknown command <" << cmd << ">" << endl;

  } 
}
Exemple #27
0
/// \brief TODOCUMENT
///
/// \relates alignment
///
/// CORA file format
///
///  The header consists of the following
///  - One format line '#FM CORA_FORMAT 1.1'
///  - Any number of comment lines '#CC'
///  - Total number of proteins in the alignment
///  - All CATH domain names in the alignment
///  - Total number of alignment positions
///
/// For example:
///
///     #FM CORA_FORMAT 1.1
///     #CC
///     #CC Any number of comment lines (200 characters max per line)
///     #CC
///     #CC
///     3
///     6insE0 1igl00 1bqt00
///     73
///
/// The body consists of the following:
///
///          START       PROT 1     PROT 2     PROT N         END
///     <------------><---------><---------><---------><---------------->
///     ddddxddddxddddxddddcxcxxcxddddcxcxxcxddddcxcxxcxxxcxddddxddddxxdd
///
///        1    0    1    0  0  0    1  A  0    0  0  0   0    0    0   0
///        2    0    1    0  0  0    2  Y  0    0  0  0   0    0    0   0
///        3    0    2    1B F  0    3  R  0    0  0  0   0    0    0   0
///        4    0    3    2B V  H    4  P  0    1  G  0   0    1    0   2
///        5    1    3    3B N  H    5  S  0    2  P  0   0    1    0   6
///        6    0    3    4B Q  H    6  E  0    3  E  0   0    1    0   2
///
/// START (14 characters) :
///   - Column 1: Alignment Position (dddd)
///   - Column 2: No. of position selected for structural template (dddd)
///   - Column 3: No. of proteins aligned at this position (dddd)
///
/// PROT 1,2,3... (11 characters per protein)
///   - Column 4 (7,10 etc): Residue number in PDB file (ddddc) 4 digit number
///   -    + 1 character insert code
///   -    Importantly the insert code is always in the same position not within
///   -    the 4 characters reserved for the pdb number (see below)
///   - Column 5 (8,11 etc): Amino Acid Code (c)
///   - Column 6 (9,12 etc): Secondary Structure Assignment (c)
///
/// END (18 characters)
///   - Last Column-3: Consensus Secondary Structure Assignment (c)
///   - Last Column-2: No. of alpha residues at this position (dddd)
///   - Last Column-1: No. of beta  residues at this position (dddd)
///   - Last Column: Structural Conservation Score (dd)
alignment cath::align::read_alignment_from_cath_cora_legacy_format(istream        &arg_istream, ///< TODOCUMENT
                                                                   const pdb_list &arg_pdbs,    ///< TODOCUMENT
                                                                   ostream        &arg_stderr   ///< TODOCUMENT
                                                                   ) {
	const size_t CHARS_IN_MAIN_DATA_LINE_START = 14;
	const size_t CHARS_IN_MAIN_DATA_LINE_PROT  = 11;
	const size_t CHARS_IN_MAIN_DATA_LINE_END   = 18;

	if (arg_pdbs.empty()) {
		BOOST_THROW_EXCEPTION(invalid_argument_exception("Cannot load a CORA legacy alignment with 0 PDB entries"));
	}

	arg_istream.exceptions(ios::badbit);
	try {
		residue_name_vec_vec residue_names_of_first_chains;
		for (const pdb &arg_pdb : arg_pdbs) {
			residue_names_of_first_chains.push_back( arg_pdb.get_residue_names_of_first_chain__backbone_unchecked() );
		}

		// Check the first line is the file format line
		string line_string;
		getline(arg_istream, line_string);
		if (!starts_with(line_string, "#FM CORA_FORMAT ")) {
			BOOST_THROW_EXCEPTION(runtime_error_exception("No CORA header file format line"));
		}

		// Skip any comment lines
		while (getline(arg_istream, line_string) && starts_with(line_string, "#CC")) {
		}

		// Grab the number of proteins and ensure the alignment matches
		const size_t num_proteins = lexical_cast<size_t>(line_string);
		if (num_proteins != arg_pdbs.size()) {
			BOOST_THROW_EXCEPTION(invalid_argument_exception("Number of PDBs in CORA file is " + lexical_cast<string>(num_proteins) + ", which does not match " + lexical_cast<string>(arg_pdbs.size())));
		}
		const size_t num_chars_in_main_data_line = CHARS_IN_MAIN_DATA_LINE_START + (num_proteins * CHARS_IN_MAIN_DATA_LINE_PROT) + CHARS_IN_MAIN_DATA_LINE_END;

		// Grab the protein names
		getline( arg_istream, line_string );
		trim( line_string );
		const str_vec names = split_build<str_vec>( line_string, is_space() );
		if ( names.size() != num_proteins ) {
			BOOST_THROW_EXCEPTION(runtime_error_exception("Splitting on space does not give " + lexical_cast<string>(num_proteins) + " entries in CORA alignment names line: \"" + line_string + "\""));
		}

		// Grab the total number of alignment positions
		getline(arg_istream, line_string);
		const size_t num_positions = lexical_cast<size_t>(line_string);

		// Prepare the data structures to populate
		aln_posn_vec posns( num_proteins, 0 );
		opt_score_vec scores;
		scores.reserve( num_positions );
		opt_aln_posn_vec_vec data( num_proteins );
		for (opt_aln_posn_vec &data_col : data) {
			data_col.reserve( num_positions );
		}
		// Loop over the main data section
		while (getline(arg_istream, line_string)) {
			// Check the line is of the correct length
			if (line_string.length() != num_chars_in_main_data_line) {
				BOOST_THROW_EXCEPTION(runtime_error_exception("Number of characters in main data line does not equal " + lexical_cast<string>(num_chars_in_main_data_line)));
			}

			// Grab the global details from start of this line
			const size_t      alignment_posn = lexical_cast<size_t>( trim_copy( line_string.substr(  0, 4 ))); // Column 1: Alignment Position (dddd)
//			const size_t num_entries_in_temp = lexical_cast<size_t>( trim_copy( line_string.substr(  5, 4 ))); // Column 2: No. of position selected for structural template (dddd)
			const size_t num_entries_in_posn = lexical_cast<size_t>( trim_copy( line_string.substr( 10, 4 ))); // Column 2: No. of position selected for structural template (dddd)

			if (alignment_posn != data.front().size() + 1) {
				BOOST_THROW_EXCEPTION(runtime_error_exception("Alignment position counter " + lexical_cast<string>(alignment_posn) + " does not match " + lexical_cast<string>(data.front().size() + 1)));
			}

			// Loop over the indices of the proteins
			size_t num_present_posns(0);
			for (size_t prot_ctr = 0; prot_ctr < num_proteins; ++prot_ctr) {
				// Prepare string and other data for this protein
				const size_t        prot_string_offset = CHARS_IN_MAIN_DATA_LINE_START + prot_ctr * CHARS_IN_MAIN_DATA_LINE_PROT;
				const string        prot_string        = line_string.substr( prot_string_offset, CHARS_IN_MAIN_DATA_LINE_PROT );
				opt_aln_posn_vec   &data_col           = data [ prot_ctr ];
				aln_posn_type       &posn              = posns[ prot_ctr ];

				// Grab the the details for this protein
				const int              residue_num = lexical_cast<int>(          trim_copy( prot_string.substr(  1, 4 ))); // Column 4 (7,10 etc): Residue number in PDB file (ddddc) 4 digit number
				const char             insert_code =                                        prot_string.at(      5    )  ; //    + 1 character insert code
				const char              amino_acid =                                        prot_string.at(      7    )  ; // Column 5 (8,11 etc): Amino Acid Code (c)
//				const char               sec_struc =                                        prot_string.at(     10    )  ; // Column 6 (9,12 etc): Secondary Structure Assignment (c)

				// Find the residue in the list of this PDB's residue names
				const residue_name_vec &residues_names = residue_names_of_first_chains[ prot_ctr ];
				const residue_name      res_name       = make_residue_name_with_non_insert_char( residue_num, insert_code, ' ' );
				const opt_aln_posn      find_result    = search_for_residue_in_residue_names(
					posn,
					residues_names,
					amino_acid,
					res_name,
					arg_stderr
				);
				data_col.push_back( find_result ? opt_aln_posn( ( *find_result ) + 1 ) : opt_aln_posn( none ) );
				if ( find_result ) {
					posn = *find_result;
					++num_present_posns;
				}
			}
			if (num_present_posns != num_entries_in_posn) {
				BOOST_THROW_EXCEPTION(runtime_error_exception(
						"Number of positions for alignment_posn " + lexical_cast<string>(alignment_posn)
						+ " was " + lexical_cast<string>(num_present_posns)
						+ " not " + lexical_cast<string>(num_entries_in_posn)
				));
			}

			// Prepare the string for the global details at the end of this line
			const size_t end_string_offset = CHARS_IN_MAIN_DATA_LINE_START + num_proteins * CHARS_IN_MAIN_DATA_LINE_PROT;
			const string end_string = line_string.substr( end_string_offset, CHARS_IN_MAIN_DATA_LINE_END );

			// Grab the global details from start of this line
//			const size_t      cons_sec_struc =                                         end_string.at(      3    )  ; // Last Column-3: Consensus Secondary Structure Assignment (c)
//			const size_t       num_alpha_res = lexical_cast<size_t>( trim_copy(  end_string.substr(  5, 4 ))); // Last Column-2: No. of alpha residues at this position (dddd)
//			const size_t        num_beta_res = lexical_cast<size_t>( trim_copy(  end_string.substr( 10, 4 ))); // Last Column-1: No. of beta residues at this position (dddd)
			const size_t          cons_score = lexical_cast<size_t>( trim_copy(  end_string.substr( 16, 2 ))); // Last Column: Structural Conservation Score (dd)

			scores.push_back( numeric_cast<double>( cons_score ) );
//			// If there are multiple entries in this position then store the score
//			if (num_entries_in_posn > 1) {
////				cerr << "Adding score for " << alignment_posn-1 << endl;
//				scores.push_back(cons_score);
//			}
		}

		if ( num_positions != data.front().size() ) {
			BOOST_THROW_EXCEPTION(runtime_error_exception(
				"CORA legacy alignment number of positions was "
				+ lexical_cast<string>( data.front().size() )
				+ " not "
				+ lexical_cast<string>( num_positions )
			) );
		}

		alignment new_alignment = alignment_offset_1_factory( data );

		// Create a scores matrix and then empty any cells that are absent from the alignment
		opt_score_vec_vec all_scores( new_alignment.num_entries(), scores );
		for (size_t entry = 0; entry < new_alignment.num_entries(); ++entry) {
			for (size_t index = 0; index < new_alignment.length(); ++index) {
				if ( ! has_position_of_entry_of_index( new_alignment, entry, index ) ) {
					all_scores[ entry ][ index ] = none;
				}
			}
		}
		set_scores( new_alignment, all_scores);
		return new_alignment;
	}
	// Catch any I/O exceptions
	catch (const std::exception &ex) {
		const string error_message(string("Cannot read CORA legacy alignment file [") + ex.what() + "] ");
		perror(error_message.c_str());
		BOOST_THROW_EXCEPTION(runtime_error_exception(error_message));
	};
}
Exemple #28
0
/// Writes all available data from \p is.
void ostream::read (istream& is)
{
    write (is.ipos(), is.remaining());
    is.seek (is.size());
}
bool OptimizableGraph::load(istream& is, bool createEdges)
{
  // scna for the paramers in the whole file
  if (!_parameters.read(is,&_renamedTypesLookup))
    return false;
#ifndef NDEBUG
  cerr << "Loaded " << _parameters.size() << " parameters" << endl;
#endif
  is.clear();
  is.seekg(ios_base::beg);
  set<string> warnedUnknownTypes;
  stringstream currentLine;
  string token;

  Factory* factory = Factory::instance();
  HyperGraph::GraphElemBitset elemBitset;
  elemBitset[HyperGraph::HGET_PARAMETER] = 1;
  elemBitset.flip();

  Vertex* previousVertex = 0;
  Data* previousData = 0;

  while (1) {
    int bytesRead = readLine(is, currentLine);
    if (bytesRead == -1)
      break;
    currentLine >> token;
    //cerr << "Token=" << token << endl;
    if (bytesRead == 0 || token.size() == 0 || token[0] == '#')
      continue;

    // handle commands encoded in the file
    bool handledCommand = false;
    
    if (token == "FIX") {
      handledCommand = true;
      int id;
      while (currentLine >> id) {
        OptimizableGraph::Vertex* v = static_cast<OptimizableGraph::Vertex*>(vertex(id));
        if (v) {
#        ifndef NDEBUG
          cerr << "Fixing vertex " << v->id() << endl;
#        endif
          v->setFixed(true);
        } else {
          cerr << "Warning: Unable to fix vertex with id " << id << ". Not found in the graph." << endl;
        }
      }
    }

    if (handledCommand)
      continue;
     
    // do the mapping to an internal type if it matches
    if (_renamedTypesLookup.size() > 0) {
      map<string, string>::const_iterator foundIt = _renamedTypesLookup.find(token);
      if (foundIt != _renamedTypesLookup.end()) {
        token = foundIt->second;
      }
    }

    if (! factory->knowsTag(token)) {
      if (warnedUnknownTypes.count(token) != 1) {
        warnedUnknownTypes.insert(token);
        cerr << CL_RED(__PRETTY_FUNCTION__ << " unknown type: " << token) << endl;
      }
      continue;
    }

    HyperGraph::HyperGraphElement* element = factory->construct(token, elemBitset);
    if (dynamic_cast<Vertex*>(element)) { // it's a vertex type
      //cerr << "it is a vertex" << endl;
      previousData = 0;
      Vertex* v = static_cast<Vertex*>(element);
      int id;
      currentLine >> id;
      bool r = v->read(currentLine);
      if (! r)
        cerr << __PRETTY_FUNCTION__ << ": Error reading vertex " << token << " " << id << endl;
      v->setId(id);
      if (!addVertex(v)) {
        cerr << __PRETTY_FUNCTION__ << ": Failure adding Vertex, " << token << " " << id << endl;
        delete v;
      } else {
        previousVertex = v;
      }
    }
Exemple #30
0
//LineString
void NifStream( LineString & val, istream& in, const NifInfo & info ) {
	char tmp[256];
	in.getline( tmp, 256 );
	val.line = tmp;
};