date_type
    parse_undelimited_date(const std::string& s) {
      int offsets[] = {4,2,2};
      int pos = 0;
      typedef typename date_type::year_type year_type;
      //typename date_type::ymd_type ymd((year_type::min)(),1,1);
      unsigned short y = 0, m = 0, d = 0;

      /* The two bool arguments state that parsing will not wrap
       * (only the first 8 characters will be parsed) and partial
       * strings will not be parsed.
       * Ex:
       * "2005121" will parse 2005 & 12, but not the "1" */
      boost::offset_separator osf(offsets, offsets+3, false, false);

      typedef typename boost::tokenizer<boost::offset_separator,
                                        std::basic_string<char>::const_iterator,
                                        std::basic_string<char> > tokenizer_type;
      tokenizer_type tok(s, osf);
      for(typename tokenizer_type::iterator ti=tok.begin(); ti!=tok.end();++ti) {
        unsigned short i = boost::lexical_cast<unsigned short>(*ti);
        switch(pos) {
        case 0: y = i; break;
        case 1: m = i; break;
        case 2: d = i; break;
        default:       break;
        }
        pos++;
      }
      return date_type(y,m,d);
    }
 inline
 time_duration
 parse_undelimited_time_duration(const std::string& s)
 {
   int offsets[] = {2,2,2};
   int pos = 0, sign = 0;
   int hours = 0;
   short min=0, sec=0;
   // increment one position if the string was "signed"
   if(s.at(sign) == '-')
   {
     ++sign;
   }
   // stlport choked when passing s.substr() to tokenizer
   // using a new string fixed the error
   std::string remain = s.substr(sign);
   boost::offset_separator osf(offsets, offsets+3); 
   boost::tokenizer<boost::offset_separator> tok(remain, osf);
   for(boost::tokenizer<boost::offset_separator>::iterator ti=tok.begin(); ti!=tok.end();++ti){
     switch(pos) {
     case 0: 
       {
         hours = boost::lexical_cast<int>(*ti); 
         break;
       }
     case 1: 
       {
         min = boost::lexical_cast<short>(*ti); 
         break;
       }
     case 2: 
      {
        sec = boost::lexical_cast<short>(*ti); 
        break;
       }
     };
     pos++;
   }
   if(sign) {
     return -time_duration(hours, min, sec);
   }
   else {
     return time_duration(hours, min, sec);
   }
 }
Exemple #3
0
 date_type
 parse_undelimited_date(const std::string& s) {
   int offsets[] = {4,2,2};
   int pos = 0;
   typedef typename date_type::year_type year_type;
   typename date_type::ymd_type ymd(year_type::min(),1,1);
   boost::offset_separator osf(offsets, offsets+3);
   boost::tokenizer<boost::offset_separator> tok(s, osf);
   for(boost::tokenizer<boost::offset_separator>::iterator ti=tok.begin(); ti!=tok.end();++ti) {
     unsigned short i = boost::lexical_cast<unsigned short>(*ti);
     //      std::cout << i << std::endl;
     switch(pos) {
     case 0: ymd.year = i; break;
     case 1: ymd.month = i; break;
     case 2: ymd.day = i; break;
     }
     pos++;
   }
   return date_type(ymd);
 }
  inline
  time_duration
  parse_undelimited_time_duration(const std::string& s)
  {
    int precision = 0;
    {
      // msvc wouldn't compile 'time_duration::num_fractional_digits()' 
      // (required template argument list) as a workaround, a temp 
      // time_duration object was used
      time_duration tmp(0,0,0,1);
      precision = tmp.num_fractional_digits();
    }
    // 'precision+1' is so we grab all digits, plus the decimal
    int offsets[] = {2,2,2, precision+1};
    int pos = 0, sign = 0;
    int hours = 0;
    short min=0, sec=0;
    boost::int64_t fs=0;
    // increment one position if the string was "signed"
    if(s.at(sign) == '-')
    {
      ++sign;
    }
    // stlport choked when passing s.substr() to tokenizer
    // using a new string fixed the error
    std::string remain = s.substr(sign);
    /* We do not want the offset_separator to wrap the offsets, we 
     * will never want to  process more than: 
     * 2 char, 2 char, 2 char, frac_sec length.
     * We *do* want the offset_separator to give us a partial for the
     * last characters if there were not enough provided in the input string. */
    bool wrap_off = false;
    bool ret_part = true;
    boost::offset_separator osf(offsets, offsets+4, wrap_off, ret_part); 
    typedef boost::tokenizer<boost::offset_separator,
                             std::basic_string<char>::const_iterator,
                             std::basic_string<char> > tokenizer;
    typedef boost::tokenizer<boost::offset_separator,
                             std::basic_string<char>::const_iterator,
                             std::basic_string<char> >::iterator tokenizer_iterator;
    tokenizer tok(remain, osf);
    for(tokenizer_iterator ti=tok.begin(); ti!=tok.end();++ti){
      switch(pos) {
        case 0: 
          {
            hours = boost::lexical_cast<int>(*ti); 
            break;
          }
        case 1: 
          {
            min = boost::lexical_cast<short>(*ti); 
            break;
          }
        case 2: 
          {
            sec = boost::lexical_cast<short>(*ti); 
            break;
          }
        case 3:
          {
            std::string char_digits(ti->substr(1)); // digits w/no decimal
            int digits = static_cast<int>(char_digits.length());
            
            //Works around a bug in MSVC 6 library that does not support
            //operator>> thus meaning lexical_cast will fail to compile.
#if (defined(BOOST_MSVC) && (_MSC_VER <= 1200))  // 1200 == VC++ 6.0
            // _atoi64 is an MS specific function
            if(digits >= precision) {
              // drop excess digits
              fs = _atoi64(char_digits.substr(0, precision).c_str());
            }
            else if(digits == 0) {
              fs = 0; // just in case _atoi64 doesn't like an empty string
            }
            else {
              fs = _atoi64(char_digits.c_str());
            }
#else
            if(digits >= precision) {
              // drop excess digits
              fs = boost::lexical_cast<boost::int64_t>(char_digits.substr(0, precision));
            }
            else if(digits == 0) {
              fs = 0; // lexical_cast doesn't like empty strings
            }
            else {
              fs = boost::lexical_cast<boost::int64_t>(char_digits);
            }
#endif
            if(digits < precision){
              // trailing zeros get dropped from the string, 
              // "1:01:01.1" would yield .000001 instead of .100000
              // the power() compensates for the missing decimal places
              fs *= power(10, precision - digits); 
            }
            
            break;
          }
          default: break;
      };
      pos++;
    }
    if(sign) {
      return -time_duration(hours, min, sec, fs);
    }
    else {
      return time_duration(hours, min, sec, fs);
    }
  }
void Foam::sixDofTopoMotion::addZonesAndModifiers()
{
    // Add zones and modifiers for motion action

    if (useTopoSliding_)
    {
        if
        (
            pointZones().size() > 0
         || faceZones().size() > 0
         || cellZones().size() > 0
        )
        {
            Info<< "void sixDofTopoMotion::addZonesAndModifiers() : "
                << "Zones and modifiers already present.  Skipping."
                << endl;

            if (topoChanger_.size() == 0)
            {
                FatalErrorIn
                (
                    "void sixDofTopoMotion::addZonesAndModifiers()"
                )   << "Mesh modifiers not read properly"
                    << abort(FatalError);
            }

            return;
        }

        Info<< "Time = " << time().timeName() << endl
            << "Adding zones and modifiers to the mesh" << endl;

        // Add zones
        List<pointZone*> pz(3*bodies_.size());
        List<faceZone*> fz(3*bodies_.size());
        List<cellZone*> cz(0);

        label npz = 0;
        label nfz = 0;
        label nSliders = 0;

        forAll (bodies_, bodyI)
        {
            const floatingBody& curBody = bodies_[bodyI];

            if
            (
                curBody.hullSlider().active()
             && curBody.fixedSlider().active()
            )
            {
                nSliders++;

                // Add an empty zone for cut points
                pz[npz] = new pointZone
                (
                    curBody.name() + "CutPointZone",
                    labelList(0),
                    npz,
                    pointZones()
                );
                npz++;

                // Do face zones for slider

                // Inner slider
                const polyPatch& innerSlider =
                    boundaryMesh()[curBody.hullSlider().index()];

                labelList isf(innerSlider.size());

                forAll (isf, i)
                {
                    isf[i] = innerSlider.start() + i;
                }

                fz[nfz] = new faceZone
                (
                    curBody.name() + "InsideSliderZone",
                    isf,
                    boolList(innerSlider.size(), false),
                    nfz,
                    faceZones()
                );
                nfz++;

                // Outer slider
                const polyPatch& outerSlider =
                    boundaryMesh()[curBody.fixedSlider().index()];

                labelList osf(outerSlider.size());

                forAll (osf, i)
                {
                    osf[i] = outerSlider.start() + i;
                }

                fz[nfz] = new faceZone
                (
                    curBody.name() + "OutsideSliderZone",
                    osf,
                    boolList(outerSlider.size(), false),
                    nfz,
                    faceZones()
                );
                nfz++;

                // Add empty zone for cut faces
                fz[nfz] = new faceZone
                (
                    curBody.name() + "CutFaceZone",
                    labelList(0),
                    boolList(0, false),
                    nfz,
                    faceZones()
                );
                nfz++;
            }