Esempio n. 1
0
// Based on a paper by Samuel R. Buss and Jin-Su Kim // TODO: Cite the paper properly
rk_result_t Robot::selectivelyDampedLeastSquaresIK_chain(const vector<size_t> &jointIndices, VectorXd &jointValues,
                                              const Isometry3d &target, const Isometry3d &finalTF)
{
    return RK_SOLVER_NOT_READY;
    // FIXME: Make this work


    // Arbitrary constant for maximum angle change in one step
    gammaMax = M_PI/4; // TODO: Put this in the constructor so the user can change it at a whim


    vector<Linkage::Joint*> joints;
    joints.resize(jointIndices.size());
    // FIXME: Add in safety checks
    for(int i=0; i<joints.size(); i++)
        joints[i] = joints_[jointIndices[i]];

    // ~~ Declarations ~~
    MatrixXd J;
    JacobiSVD<MatrixXd> svd;
    Isometry3d pose;
    AngleAxisd aagoal;
    AngleAxisd aastate;
    Vector6d goal;
    Vector6d state;
    Vector6d err;
    Vector6d alpha;
    Vector6d N;
    Vector6d M;
    Vector6d gamma;
    VectorXd delta(jointValues.size());
    VectorXd tempPhi(jointValues.size());
    // ~~~~~~~~~~~~~~~~~~

//    cout << "\n\n" << endl;

    tolerance = 1*M_PI/180; // TODO: Put this in the constructor so the user can set it arbitrarily
    maxIterations = 1000; // TODO: Put this in the constructor so the user can set it arbitrarily

    size_t iterations = 0;
    do {

        values(jointIndices, jointValues);

        jacobian(J, joints, joints.back()->respectToRobot().translation()+finalTF.translation(), this);

        svd.compute(J, ComputeFullU | ComputeThinV);

    //    cout <<  "\n\n" << svd.matrixU() << "\n\n\n" << svd.singularValues().transpose() << "\n\n\n" << svd.matrixV() << endl;

    //    for(int i=0; i<svd.matrixU().cols(); i++)
    //        cout << "u" << i << " : " << svd.matrixU().col(i).transpose() << endl;


    //    std::cout << "Joint name: " << joint(jointIndices.back()).name()
    //              << "\t Number: " << jointIndices.back() << std::endl;
        pose = joint(jointIndices.back()).respectToRobot()*finalTF;

    //    std::cout << "Pose: " << std::endl;
    //    std::cout << pose.matrix() << std::endl;

    //    AngleAxisd aagoal(target.rotation());
        aagoal = target.rotation();
        goal << target.translation(), aagoal.axis()*aagoal.angle();

        aastate = pose.rotation();
        state << pose.translation(), aastate.axis()*aastate.angle();

        err = goal-state;

    //    std::cout << "state: " << state.transpose() << std::endl;
    //    std::cout << "err: " << err.transpose() << std::endl;

        for(int i=0; i<6; i++)
            alpha[i] = svd.matrixU().col(i).dot(err);

    //    std::cout << "Alpha: " << alpha.transpose() << std::endl;

        for(int i=0; i<6; i++)
        {
            N[i] = svd.matrixU().block(0,i,3,1).norm();
            N[i] += svd.matrixU().block(3,i,3,1).norm();
        }

    //    std::cout << "N: " << N.transpose() << std::endl;

        double tempMik = 0;
        for(int i=0; i<svd.matrixV().cols(); i++)
        {
            M[i] = 0;
            for(int k=0; k<svd.matrixU().cols(); k++)
            {
                tempMik = 0;
                for(int j=0; j<svd.matrixV().cols(); j++)
                    tempMik += fabs(svd.matrixV()(j,i))*J(k,j);
                M[i] += 1/svd.singularValues()[i]*tempMik;
            }
        }

    //    std::cout << "M: " << M.transpose() << std::endl;

        for(int i=0; i<svd.matrixV().cols(); i++)
            gamma[i] = minimum(1, N[i]/M[i])*gammaMax;

    //    std::cout << "Gamma: " << gamma.transpose() << std::endl;

        delta.setZero();
        for(int i=0; i<svd.matrixV().cols(); i++)
        {
    //        std::cout << "1/sigma: " << 1/svd.singularValues()[i] << std::endl;
            tempPhi = 1/svd.singularValues()[i]*alpha[i]*svd.matrixV().col(i);
    //        std::cout << "Phi: " << tempPhi.transpose() << std::endl;
            clampMaxAbs(tempPhi, gamma[i]);
            delta += tempPhi;
    //        std::cout << "delta " << i << ": " << delta.transpose() << std::endl;
        }

        clampMaxAbs(delta, gammaMax);

        jointValues += delta;

        std::cout << iterations << " | Norm:" << delta.norm() << "\tdelta: "
                  << delta.transpose() << "\tJoints:" << jointValues.transpose() << std::endl;

        iterations++;
    } while(delta.norm() > tolerance && iterations < maxIterations);
}
Esempio n. 2
0
 void MethodTable::setup(STATE, size_t sz = 0) {
   if(!sz) sz = METHODTABLE_MIN_SIZE;
   values(state, Tuple::create(state, sz));
   bins(state, Fixnum::from(sz));
   entries(state, Fixnum::from(0));
 }
Esempio n. 3
0
// FIXME - most of the old code for this was encapsulated in EntityTree, I liked that design from a data
// hiding and object oriented perspective. But that didn't really allow us to handle the case of lots
// of entities being deleted at the same time. I'd like to look to move this back into EntityTree but
// for now this works and addresses the bug.
int EntityServer::sendSpecialPackets(const SharedNodePointer& node, OctreeQueryNode* queryNode, int& packetsSent) {
    int totalBytes = 0;

    EntityNodeData* nodeData = static_cast<EntityNodeData*>(node->getLinkedData());
    if (nodeData) {

        quint64 deletedEntitiesSentAt = nodeData->getLastDeletedEntitiesSentAt();
        quint64 considerEntitiesSince = EntityTree::getAdjustedConsiderSince(deletedEntitiesSentAt);

        quint64 deletePacketSentAt = usecTimestampNow();
        EntityTreePointer tree = std::static_pointer_cast<EntityTree>(_tree);
        auto recentlyDeleted = tree->getRecentlyDeletedEntityIDs();

        packetsSent = 0;

        // create a new special packet
        std::unique_ptr<NLPacket> deletesPacket = NLPacket::create(PacketType::EntityErase);

        // pack in flags
        OCTREE_PACKET_FLAGS flags = 0;
        deletesPacket->writePrimitive(flags);

        // pack in sequence number
        auto sequenceNumber = queryNode->getSequenceNumber();
        deletesPacket->writePrimitive(sequenceNumber);

        // pack in timestamp
        OCTREE_PACKET_SENT_TIME now = usecTimestampNow();
        deletesPacket->writePrimitive(now);

        // figure out where we are now and pack a temporary number of IDs
        uint16_t numberOfIDs = 0;
        qint64 numberOfIDsPos = deletesPacket->pos();
        deletesPacket->writePrimitive(numberOfIDs);

        // we keep a multi map of entity IDs to timestamps, we only want to include the entity IDs that have been
        // deleted since we last sent to this node
        auto it = recentlyDeleted.constBegin();
        while (it != recentlyDeleted.constEnd()) {

            // if the timestamp is more recent then out last sent time, include it
            if (it.key() > considerEntitiesSince) {

                // get all the IDs for this timestamp
                const auto& entityIDsFromTime = recentlyDeleted.values(it.key());

                for (const auto& entityID : entityIDsFromTime) {

                    // check to make sure we have room for one more ID, if we don't have more
                    // room, then send out this packet and create another one
                    if (NUM_BYTES_RFC4122_UUID > deletesPacket->bytesAvailableForWrite()) {

                        // replace the count for the number of included IDs
                        deletesPacket->seek(numberOfIDsPos);
                        deletesPacket->writePrimitive(numberOfIDs);

                        // Send the current packet
                        queryNode->packetSent(*deletesPacket);
                        auto thisPacketSize = deletesPacket->getDataSize();
                        totalBytes += thisPacketSize;
                        packetsSent++;
                        DependencyManager::get<NodeList>()->sendPacket(std::move(deletesPacket), *node);

                        #ifdef EXTRA_ERASE_DEBUGGING
                            qDebug() << "EntityServer::sendSpecialPackets() sending packet packetsSent[" << packetsSent << "] size:" << thisPacketSize;
                        #endif


                        // create another packet
                        deletesPacket = NLPacket::create(PacketType::EntityErase);

                        // pack in flags
                        deletesPacket->writePrimitive(flags);

                        // pack in sequence number
                        sequenceNumber = queryNode->getSequenceNumber();
                        deletesPacket->writePrimitive(sequenceNumber);

                        // pack in timestamp
                        deletesPacket->writePrimitive(now);

                        // figure out where we are now and pack a temporary number of IDs
                        numberOfIDs = 0;
                        numberOfIDsPos = deletesPacket->pos();
                        deletesPacket->writePrimitive(numberOfIDs);
                    }

                    // FIXME - we still seem to see cases where incorrect EntityIDs get sent from the server
                    // to the client. These were causing "lost" entities like flashlights and laser pointers
                    // now that we keep around some additional history of the erased entities and resend that
                    // history for a longer time window, these entities are not "lost". But we haven't yet
                    // found/fixed the underlying issue that caused bad UUIDs to be sent to some users.
                    deletesPacket->write(entityID.toRfc4122());
                    ++numberOfIDs;

                    #ifdef EXTRA_ERASE_DEBUGGING
                        qDebug() << "EntityTree::encodeEntitiesDeletedSince() including:" << entityID;
                    #endif
                } // end for (ids)

            } // end if (it.val > sinceLast)


            ++it;
        } // end while

        // replace the count for the number of included IDs
        deletesPacket->seek(numberOfIDsPos);
        deletesPacket->writePrimitive(numberOfIDs);

        // Send the current packet
        queryNode->packetSent(*deletesPacket);
        auto thisPacketSize = deletesPacket->getDataSize();
        totalBytes += thisPacketSize;
        packetsSent++;
        DependencyManager::get<NodeList>()->sendPacket(std::move(deletesPacket), *node);
        #ifdef EXTRA_ERASE_DEBUGGING
            qDebug() << "EntityServer::sendSpecialPackets() sending packet packetsSent[" << packetsSent << "] size:" << thisPacketSize;
        #endif

        nodeData->setLastDeletedEntitiesSentAt(deletePacketSentAt);
    }

    #ifdef EXTRA_ERASE_DEBUGGING
        if (packetsSent > 0) {
            qDebug() << "EntityServer::sendSpecialPackets() sent " << packetsSent << "special packets of " 
                        << totalBytes << " total bytes to node:" << node->getUUID();
        }
    #endif

    // TODO: caller is expecting a packetLength, what if we send more than one packet??
    return totalBytes;
}
Esempio n. 4
0
//---------------------------------------------------------------------------//
// Test templates
//---------------------------------------------------------------------------//
TEUCHOS_UNIT_TEST( MCSolver, solve )
{
    typedef Tpetra::Vector<double,int,long> VectorType;
    typedef MCLS::VectorTraits<VectorType> VT;
    typedef Tpetra::CrsMatrix<double,int,long> MatrixType;
    typedef MCLS::MatrixTraits<VectorType,MatrixType> MT;
    typedef std::mt19937 rng_type;
    typedef MCLS::AdjointTally<VectorType> TallyType;
    typedef MCLS::AlmostOptimalDomain<VectorType,MatrixType,rng_type,TallyType> DomainType;
    typedef MCLS::UniformAdjointSource<DomainType> SourceType;

    Teuchos::RCP<const Teuchos::Comm<int> > comm = 
	Teuchos::DefaultComm<int>::getComm();
    int comm_size = comm->getSize();

    int local_num_rows = 10;
    int global_num_rows = local_num_rows*comm_size;
    Teuchos::RCP<const Tpetra::Map<int,long> > map = 
	Tpetra::createUniformContigMap<int,long>( global_num_rows, comm );

    // Build the linear system. This operator is symmetric with a spectral
    // radius less than 1.
    Teuchos::RCP<MatrixType> A = Tpetra::createCrsMatrix<double,int,long>( map );
    Teuchos::Array<long> global_columns( 3 );
    Teuchos::Array<double> values( 3 );
    global_columns[0] = 0;
    global_columns[1] = 1;
    global_columns[2] = 2;
    values[0] = 1.0/comm_size;
    values[1] = -0.14/comm_size;
    values[2] = 0.0/comm_size;
    A->insertGlobalValues( 0, global_columns(), values() );
    for ( int i = 1; i < global_num_rows-1; ++i )
    {
	global_columns[0] = i-1;
	global_columns[1] = i;
	global_columns[2] = i+1;
	values[0] = -0.14/comm_size;
	values[1] = 1.0/comm_size;
	values[2] = -0.14/comm_size;
	A->insertGlobalValues( i, global_columns(), values() );
    }
    global_columns[0] = global_num_rows-3;
    global_columns[1] = global_num_rows-2;
    global_columns[2] = global_num_rows-1;
    values[0] = 0.0/comm_size;
    values[1] = -0.14/comm_size;
    values[2] = 1.0/comm_size;
    A->insertGlobalValues( global_num_rows-1, global_columns(), values() );
    A->fillComplete();

    // Build the LHS. Put a large positive number here to be sure we are
    // clear the vector before solving.
    Teuchos::RCP<VectorType> x = MT::cloneVectorFromMatrixRows( *A );
    VT::putScalar( *x, 100.0 );

    // Build the RHS with negative numbers. this gives us a negative
    // solution. 
    Teuchos::RCP<VectorType> b = MT::cloneVectorFromMatrixRows( *A );
    VT::putScalar( *b, -2.0 );

    // Create the solver.
    Teuchos::RCP<Teuchos::ParameterList> plist = 
	Teuchos::rcp( new Teuchos::ParameterList() );
    plist->set<int>("MC Check Frequency", 10);
    plist->set<bool>("Reproducible MC Mode",true);
    plist->set<std::string>("Transport Type", "Global" );
    MCLS::MCSolver<SourceType> solver( comm, comm->getRank(), plist );

    // Build the adjoint domain.
    plist->set<int>( "Overlap Size", 2 );
    Teuchos::RCP<DomainType> domain = Teuchos::rcp( new DomainType( A, x, *plist ) );

    // Create the adjoint source with a set number of histories.
    int mult = 100;
    plist->set<double>("Sample Ratio",mult);
    Teuchos::RCP<SourceType> source = Teuchos::rcp(
	new SourceType( b, domain, *plist ) );

    // Set the Domain.
    solver.setDomain( domain );

    // Set the Source.
    solver.setSource( source );

    // Do solve the problem.
    solver.solve();

    // Check that we got a negative solution.
    Teuchos::ArrayRCP<const double> x_view = 
        VT::view( *x );
    typename Teuchos::ArrayRCP<const double>::const_iterator x_view_it;
    for ( x_view_it = x_view.begin(); x_view_it != x_view.end(); ++x_view_it )
    {
	TEST_ASSERT( *x_view_it < Teuchos::ScalarTraits<double>::zero() );
    }

    // Now solve the problem with a positive source.
    VT::putScalar( *b, 2.0 );
    solver.setSource( source );
    solver.solve();
    for ( x_view_it = x_view.begin(); x_view_it != x_view.end(); ++x_view_it )
    {
	TEST_ASSERT( *x_view_it > Teuchos::ScalarTraits<double>::zero() );
    }

    // Reset the domain and solve again with a positive source.
    solver.setDomain( domain );
    solver.setSource( source );
    solver.solve();
    for ( x_view_it = x_view.begin(); x_view_it != x_view.end(); ++x_view_it )
    {
	TEST_ASSERT( *x_view_it > Teuchos::ScalarTraits<double>::zero() );
    }

    // Reset both and solve with a negative source.
    VT::putScalar( *b, -2.0 );
    solver.setDomain( domain );
    solver.setSource( source );
    solver.solve();
    for ( x_view_it = x_view.begin(); x_view_it != x_view.end(); ++x_view_it )
    {
	TEST_ASSERT( *x_view_it < Teuchos::ScalarTraits<double>::zero() );
    }
}
Esempio n. 5
0
bool KalmanFilter::writeBiasFile(const File& iFcst,
      const File& iObs,
      int iTimeStep,
      const ParameterFile* iDbIn,
      ParameterFile* iDbOut,
      ParameterFile* iBiasFile) {
   vec2 flats = iFcst.getLats();
   vec2 flons = iFcst.getLons();
   vec2 felevs = iFcst.getElevs();
   vec2 olats = iObs.getLats();
   vec2 olons = iObs.getLons();
   vec2 oelevs = iObs.getElevs();

   // Which forecast index are we using to update?
   vec2Int I,J;
   Downscaler::getNearestNeighbour(iFcst, iObs, I, J);

   // Loop over locations
   for(int oi = 0; oi < iObs.getNumLat(); oi++) {
      for(int oj = 0; oj < iObs.getNumLon(); oj++) {
         Location obsLoc(olats[oi][oj], olons[oi][oj], oelevs[oi][oj]);
         int Inearest = I[oi][oj];
         int Jnearest = J[oi][oj];
         float oelev = oelevs[oi][oj];
         float felev = felevs[Inearest][Jnearest];

         // Compute the most recent bias
         float elevCorrection = mElevGradient * (oelev - felev);
         float fcst = (*iFcst.getField(mVariable, iTimeStep))(Inearest,Jnearest,0);
         fcst += elevCorrection;
         float obs = (*iObs.getField(mVariable, iTimeStep))(oi, oj,0);
         float bias = Util::MV;
         if(Util::isValid(obs) && Util::isValid(fcst))
            bias = obs - fcst;

         // Get KF parameters
         KalmanParameters par = initialize(); // Initialize to empty
         if(iDbIn != NULL) {
            const Parameters& rawPar = iDbIn->getParameters(0, obsLoc); // Same parameters for all hours
            par = KalmanParameters(rawPar);
         }

         // Update forecasts
         KalmanParameters parNew = update(bias, iTimeStep, par);

         // Store parameters and bias
         if(iDbOut != NULL) {
            iDbOut->setParameters(parNew.toParameters(), 0, obsLoc);
         }
         if(iBiasFile != NULL) {
            std::vector<float> values(1,0);
            // Figure out which hours to write which parameters to
            std::vector<double> times = iFcst.getTimes();
            double refTime = iFcst.getReferenceTime();
            for(int h = 0; h < times.size(); h++) {
               int sec = times[h] - refTime;
               int hour = sec / 3600 % 24;
               hour = h;
               int index = (int) round((float) hour*mDim/24) % mDim;
               values[0] = parNew.x[index];
               assert(parNew.x.size() > index);
               iBiasFile->setParameters(Parameters(values), h, obsLoc);
            }
         }
      }
   }
   if(iDbOut != NULL) {
      iDbOut->write();
   }
   if(iBiasFile != NULL) {
      iBiasFile->write();
   }
   return true;
}
const std::string &OrientationEntry::initialValue() const {
	return values()[0];
}
    void YoYInflationCapFloorEngine::calculate() const {

        // copy black version then adapt to others

        Real value = 0.0;
        Size optionlets = arguments_.startDates.size();
        std::vector<Real> values(optionlets, 0.0);
        std::vector<Real> stdDevs(optionlets, 0.0);
        std::vector<Real> forwards(optionlets, 0.0);
        YoYInflationCapFloor::Type type = arguments_.type;

        Handle<YoYInflationTermStructure> yoyTS
        = index()->yoyInflationTermStructure();
        Handle<YieldTermStructure> nominalTS =
            !nominalTermStructure_.empty() ?
            nominalTermStructure_ :
            yoyTS->nominalTermStructure();
        Date settlement = nominalTS->referenceDate();

        for (Size i=0; i<optionlets; ++i) {
            Date paymentDate = arguments_.payDates[i];
            if (paymentDate > settlement) { // discard expired caplets
                DiscountFactor d = arguments_.nominals[i] *
                    arguments_.gearings[i] *
                    nominalTS->discount(paymentDate) *
                arguments_.accrualTimes[i];

                // We explicitly have the index and assume that
                // the fixing is natural, i.e. no convexity adjustment.
                // If that was required then we would also need
                // nominal vols in the pricing engine, i.e. a different engine.
                // This also means that we do not need the coupon to have
                // a pricing engine to return the swaplet rate and then
                // the adjusted fixing in the instrument.
                forwards[i] = yoyTS->yoyRate(arguments_.fixingDates[i],Period(0,Days));
                Rate forward = forwards[i];

                Date fixingDate = arguments_.fixingDates[i];
                Time sqrtTime = 0.0;
                if (fixingDate > volatility_->baseDate()){
                    sqrtTime = std::sqrt(
                        volatility_->timeFromBase(fixingDate));
                }

                if (type == YoYInflationCapFloor::Cap || type == YoYInflationCapFloor::Collar) {
                    Rate strike = arguments_.capRates[i];
                    if (sqrtTime>0.0) {
                        stdDevs[i] = std::sqrt(
                            volatility_->totalVariance(fixingDate, strike, Period(0,Days)));

                    }

                    // sttDev=0 for already-fixed dates so everything on forward
                    values[i] = optionletImpl(Option::Call, strike,
                                              forward, stdDevs[i], d);
                }
                if (type == YoYInflationCapFloor::Floor || type == YoYInflationCapFloor::Collar) {
                    Rate strike = arguments_.floorRates[i];
                    if (sqrtTime>0.0) {
                        stdDevs[i] = std::sqrt(
                            volatility_->totalVariance(fixingDate, strike, Period(0,Days)));
                    }
                    Real floorlet = optionletImpl(Option::Put, strike,
                                                  forward, stdDevs[i], d);
                    if (type == YoYInflationCapFloor::Floor) {
                        values[i] = floorlet;
                    } else {
                        // a collar is long a cap and short a floor
                        values[i] -= floorlet;
                    }

                }
                value += values[i];
            }
        }
        results_.value = value;

        results_.additionalResults["optionletsPrice"] = values;
        results_.additionalResults["optionletsAtmForward"] = forwards;
        if (type != YoYInflationCapFloor::Collar)
            results_.additionalResults["optionletsStdDev"] = stdDevs;
    }
Esempio n. 8
0
 int flipLights(int n, int m) {
     if (n == 0 || m == 0) return 1;
     vector<State> states;
     for (int option = 0; option < 8; ++option) {
         vector<bool> values(n, true);
         for (int i = 1; i <= n; ++i) {
             if (option & 1) { // even
                 if ((i & 1) == 0) {
                     values[i-1] = !values[i-1];
                 }
             }
             if (option & 2) { // odd
                 if ((i & 1) == 1) {
                     values[i-1] = !values[i-1];
                 }
             }
             if (option & 4) { // 3 * K + 1
                 if (i % 3 == 1) {
                     values[i-1] = !values[i-1];
                 }
             }
         }
         bool found = false;
         for (int i = 0; i < states.size(); ++i) {
             if (values == states[i].result) {
                 states[i].options.insert(option);
                 found = true;
                 break;
             }
         }
         if (!found) {
             State state;
             state.options.insert(option);
             state.result = values;
             states.push_back(state);
         }
     }
     if (m >= 4) {
         return states.size();
     }
     vector<bool> filled(states.size(), false);
     for (int d_value = 0; d_value <= m; ++d_value) {
         int bc_value = m - d_value;
         int d_option = (d_value % 2 == 0) ? 0 : 4;
         vector<int> bc_options;
         if (bc_value == 0) {
             bc_options.push_back(0);
         } else if (bc_value == 1) {
             bc_options.push_back(1);
             bc_options.push_back(2);
             bc_options.push_back(3);
         } else {
             bc_options.push_back(0);
             bc_options.push_back(1);
             bc_options.push_back(2);
             bc_options.push_back(3);
         }
         for (int i = 0; i < bc_options.size(); ++i) {
             int option = d_option | bc_options[i];
             for (int j = 0; j < states.size(); ++j) {
                 if (states[j].options.find(option) != states[j].options.end()) {
                     filled[j] = true;
                     break;
                 }
             }
         }
     }
     int res = 0;
     for (int i = 0; i < filled.size(); ++i) {
         if (filled[i]) res++;        
     }
     return res;
 }
void DoAttrValueTest()
{
  nsCOMPtr<nsIContentUtils> utils =
   do_GetService("@mozilla.org/content/contentutils;1");

  if (!utils)
    fail("No nsIContentUtils");

  int idx = -1;
  bool didFail = false;
  while (Data[++idx].margins) {
    nsAutoString str;
    str.AssignLiteral(Data[idx].margins);
    nsIntMargin values(99,99,99,99);
    bool result = utils->ParseIntMarginValue(str, values);

    // if the parse fails
    if (!result) {
      if (Data[idx].shouldfail)
        continue;
      fail(Data[idx].margins);
      didFail = true;
      printf("*1\n");
      continue;
    }

    if (Data[idx].shouldfail) {
      if (Data[idx].top == values.top &&
          Data[idx].right == values.right &&
          Data[idx].bottom == values.bottom &&
          Data[idx].left == values.left) {
        // not likely
        fail(Data[idx].margins);
        didFail = true;
        printf("*2\n");
        continue;
      }
      // good failure, parse failed and that's what we expected.
      continue;
    }
#if 0
    printf("%d==%d %d==%d %d==%d %d==%d\n",
      Data[idx].top, values.top,
      Data[idx].right, values.right,
      Data[idx].bottom, values.bottom,
      Data[idx].left, values.left);
#endif
    if (Data[idx].top == values.top &&
        Data[idx].right == values.right &&
        Data[idx].bottom == values.bottom &&
        Data[idx].left == values.left) {
      // good parse results
      continue;
    }
    else {
      fail(Data[idx].margins);
      didFail = true;
      printf("*3\n");
      continue;
    }
  }

  if (!didFail)
    passed("nsAttrValue margin parsing tests passed.");
}
Esempio n. 10
0
void OrthotropicSecantCoefficientOfThermalExpansion::writeXMLData(QXmlStreamWriter& stream)
{
    NQLog("OrthotropicSecantCoefficientOfThermalExpansion", NQLog::Spam) << "  XML write data for property " << getName()
                                   << " (" << getIdString().toStdString() << ")";

    NQLog("IsotropicSecantCoefficientOfThermalExpansion", NQLog::Spam) << "  XML write data for property " << getName()
                                   << " (" << getIdString().toStdString() << ")";

    stream.writeStartElement("PropertyData");
    stream.writeAttribute("property", getIdString());

    stream.writeStartElement("Data");
    stream.writeAttribute("format", "string");
    stream.writeCharacters("-");
    stream.writeEndElement(); // Data

    if (Definition_!=UnknownDefinition) {
        stream.writeStartElement("Qualifier");
        stream.writeAttribute("name", "Definition");
        stream.writeCharacters(getDefinitionAsString());
        stream.writeEndElement(); // Qualifier
    }

    if (Behavior_!=UnknownBehavior) {
        stream.writeStartElement("Qualifier");
        stream.writeAttribute("name", "Behavior");
        stream.writeCharacters(getBehaviorAsString());
        stream.writeEndElement(); // Qualifier
    }

    Parameter * parameter;
    parameter = getParameter("Coefficient of Thermal Expansion X direction");
    writeXMLparameter(stream, parameter);
    parameter = getParameter("Coefficient of Thermal Expansion Y direction");
    writeXMLparameter(stream, parameter);
    parameter = getParameter("Coefficient of Thermal Expansion Z direction");
    writeXMLparameter(stream, parameter);

    stream.writeStartElement("ParameterValue");
    stream.writeAttribute("parameter", "pa0");
    stream.writeAttribute("format", "float");

    QString values("");
    for (std::vector<ParameterValue>::const_iterator it=parameter->getValues().begin();
         it!=parameter->getValues().end();
         ++it) {

        const ParameterValue& pv = *it;
        if (it!=parameter->getValues().begin()) values += ",";
        if (pv.isTemperatureValid()) {
            values += QString::number(pv.getTemperature(), 'e', 6);
        } else {
            values += undefindedIdentifyerAsString();
        }
    }
    if (parameter->getValues().size()==0) values = undefindedIdentifyerAsString();
    stream.writeTextElement("Data", values);

    stream.writeStartElement("Qualifier");
    stream.writeAttribute("name", "Variable Type");
    stream.writeCharacters("Independent");
    stream.writeEndElement(); // Qualifier

    stream.writeEndElement(); // ParameterValue

    stream.writeEndElement(); // PropertyData

    referenceTemperatureProperty_->writeXMLData(stream);
}
String CSSMutableStyleDeclaration::getLayeredShorthandValue(const int* properties, unsigned number) const
{
    String res;

    // Begin by collecting the properties into an array.
    Vector< RefPtr<CSSValue> > values(number);
    size_t numLayers = 0;
    
    for (size_t i = 0; i < number; ++i) {
        values[i] = getPropertyCSSValue(properties[i]);
        if (values[i]) {
            if (values[i]->isValueList()) {
                CSSValueList* valueList = static_cast<CSSValueList*>(values[i].get());
                numLayers = max(valueList->length(), numLayers);
            } else
                numLayers = max<size_t>(1U, numLayers);
        }
    }
    
    // Now stitch the properties together.  Implicit initial values are flagged as such and
    // can safely be omitted.
    for (size_t i = 0; i < numLayers; i++) {
        String layerRes;
        bool useRepeatXShorthand = false;
        bool useRepeatYShorthand = false;
        bool useSingleWordShorthand = false;
        for (size_t j = 0; j < number; j++) {
            RefPtr<CSSValue> value;
            if (values[j]) {
                if (values[j]->isValueList())
                    value = static_cast<CSSValueList*>(values[j].get())->item(i);
                else {
                    value = values[j];
                    
                    // Color only belongs in the last layer.
                    if (properties[j] == CSSPropertyBackgroundColor) {
                        if (i != numLayers - 1)
                            value = 0;
                    } else if (i != 0) // Other singletons only belong in the first layer.
                        value = 0;
                }
            }

            // We need to report background-repeat as it was written in the CSS. If the property is implicit,
            // then it was written with only one value. Here we figure out which value that was so we can
            // report back correctly. 
            if (properties[j] == CSSPropertyBackgroundRepeatX && isPropertyImplicit(properties[j])) {

                // BUG 49055: make sure the value was not reset in the layer check just above.
                if (j < number - 1 && properties[j + 1] == CSSPropertyBackgroundRepeatY && value) {
                    RefPtr<CSSValue> yValue;
                    RefPtr<CSSValue> nextValue = values[j + 1];
                    if (nextValue->isValueList())
                        yValue = static_cast<CSSValueList*>(nextValue.get())->itemWithoutBoundsCheck(i);
                    else
                        yValue = nextValue;
                        
                    int xId = static_cast<CSSPrimitiveValue*>(value.get())->getIdent();
                    int yId = static_cast<CSSPrimitiveValue*>(yValue.get())->getIdent();
                    if (xId != yId) {
                        if (xId == CSSValueRepeat && yId == CSSValueNoRepeat) {
                            useRepeatXShorthand = true;
                            ++j;
                        } else if (xId == CSSValueNoRepeat && yId == CSSValueRepeat) {
                            useRepeatYShorthand = true;
                            continue;
                        }
                    } else {
                        useSingleWordShorthand = true;
                        ++j;
                    }
                }
            }
            
            if (value && !value->isImplicitInitialValue()) {
                if (!layerRes.isNull())
                    layerRes += " ";
                if (useRepeatXShorthand) {
                    useRepeatXShorthand = false;
                    layerRes += getValueName(CSSValueRepeatX);
                } else if (useRepeatYShorthand) {
                    useRepeatYShorthand = false;
                    layerRes += getValueName(CSSValueRepeatY);
                } else if (useSingleWordShorthand) {
                    useSingleWordShorthand = false;
                    layerRes += value->cssText();
                } else
                    layerRes += value->cssText();
            }
        }
        
        if (!layerRes.isNull()) {
            if (!res.isNull())
                res += ", ";
            res += layerRes;
        }
    }

    return res;
}
Esempio n. 12
0
rk_result_t Robot::dampedLeastSquaresIK_chain(const vector<size_t> &jointIndices, VectorXd &jointValues, const Isometry3d &target, const Isometry3d &finalTF)
{


    vector<Linkage::Joint*> joints;
    joints.resize(jointIndices.size());
    // FIXME: Add in safety checks
    for(int i=0; i<joints.size(); i++)
        joints[i] = joints_[jointIndices[i]];

    // ~~ Declarations ~~
    MatrixXd J;
    MatrixXd Jinv;
    Isometry3d pose;
    AngleAxisd aagoal(target.rotation());
    AngleAxisd aastate;
    Vector3d Terr;
    Vector3d Rerr;
    Vector6d err;
    VectorXd delta(jointValues.size());
    VectorXd f(jointValues.size());


    tolerance = 0.001;
    maxIterations = 50; // TODO: Put this in the constructor so the user can set it arbitrarily
    damp = 0.05;

    values(jointIndices, jointValues);

    pose = joint(jointIndices.back()).respectToRobot()*finalTF;
    aastate = pose.rotation();

    Terr = target.translation()-pose.translation();
    Rerr = aagoal.angle()*aagoal.axis()-aastate.angle()*aastate.axis();
    err << Terr, Rerr;

    size_t iterations = 0;
    do {

        jacobian(J, joints, joints.back()->respectToRobot().translation()+finalTF.translation(), this);

        f = (J*J.transpose() + damp*damp*Matrix6d::Identity()).colPivHouseholderQr().solve(err);
        delta = J.transpose()*f;

        jointValues += delta;

        values(jointIndices, jointValues);

        pose = joint(jointIndices.back()).respectToRobot()*finalTF;
        aastate = pose.rotation();

        Terr = target.translation()-pose.translation();
        Rerr = aagoal.angle()*aagoal.axis()-aastate.angle()*aastate.axis();
        err << Terr, Rerr;

        iterations++;


    } while(err.norm() > tolerance && iterations < maxIterations);

}
Esempio n. 13
0
rk_result_t Robot::jacobianTransposeIK_chain(const vector<size_t> &jointIndices, VectorXd &jointValues, const Isometry3d &target, const Isometry3d &finalTF)
{
    return RK_SOLVER_NOT_READY;
    // FIXME: Make this solver work


    vector<Linkage::Joint*> joints;
    joints.resize(jointIndices.size());
    // FIXME: Add in safety checks
    for(int i=0; i<joints.size(); i++)
        joints[i] = joints_[jointIndices[i]];

    // ~~ Declarations ~~
    MatrixXd J;
    MatrixXd Jinv;
    Isometry3d pose;
    AngleAxisd aagoal;
    AngleAxisd aastate;
    Vector6d state;
    Vector6d err;
    VectorXd delta(jointValues.size());
    Vector6d gamma;
    double alpha;

    aagoal = target.rotation();

    double Tscale = 3; // TODO: Put these as a class member in the constructor
    double Rscale = 0;

    tolerance = 1*M_PI/180; // TODO: Put this in the constructor so the user can set it arbitrarily
    maxIterations = 100; // TODO: Put this in the constructor so the user can set it arbitrarily

    size_t iterations = 0;
    do {
        values(jointIndices, jointValues);

        jacobian(J, joints, joints.back()->respectToRobot().translation()+finalTF.translation(), this);

        pose = joint(jointIndices.back()).respectToRobot()*finalTF;
        aastate = pose.rotation();
        state << pose.translation(), aastate.axis()*aastate.angle();

        err << (target.translation()-pose.translation()).normalized()*Tscale,
               (aagoal.angle()*aagoal.axis()-aastate.angle()*aastate.axis()).normalized()*Rscale;

        gamma = J*J.transpose()*err;
        alpha = err.dot(gamma)/gamma.norm();

        delta = alpha*J.transpose()*err;

        jointValues += delta;
        iterations++;

        std::cout << iterations << " | Norm:" << delta.norm()
//                  << "\tdelta: " << delta.transpose() << "\tJoints:" << jointValues.transpose() << std::endl;
                  << " | " << (target.translation() - pose.translation()).norm()
                  << "\tErr: " << (target.translation()-pose.translation()).transpose() << std::endl;

    } while(err.norm() > tolerance && iterations < maxIterations);

}
Esempio n. 14
0
rk_result_t Robot::pseudoinverseIK_chain(const vector<size_t> &jointIndices, VectorXd &jointValues,
                                  const Isometry3d &target, const Isometry3d &finalTF)
{
    return RK_SOLVER_NOT_READY;
    // FIXME: Make this solver work


    vector<Linkage::Joint*> joints;
    joints.resize(jointIndices.size());
    // FIXME: Add in safety checks
    for(int i=0; i<joints.size(); i++)
        joints[i] = joints_[jointIndices[i]];

    // ~~ Declarations ~~
    MatrixXd J;
    MatrixXd Jinv;
    Isometry3d pose;
    AngleAxisd aagoal;
    AngleAxisd aastate;
    Vector6d goal;
    Vector6d state;
    Vector6d err;
    VectorXd delta(jointValues.size());

    MatrixXd Jsub;
    aagoal = target.rotation();
    goal << target.translation(), aagoal.axis()*aagoal.angle();

    tolerance = 1*M_PI/180; // TODO: Put this in the constructor so the user can set it arbitrarily
    maxIterations = 100; // TODO: Put this in the constructor so the user can set it arbitrarily
    errorClamp = 0.25; // TODO: Put this in the constructor
    deltaClamp = M_PI/4; // TODO: Put this in the constructor

    size_t iterations = 0;
    do {

        values(jointIndices, jointValues);

        jacobian(J, joints, joints.back()->respectToRobot().translation()+finalTF.translation(), this);
        Jsub = J.block(0,0,3,jointValues.size());

        pinv(Jsub, Jinv);

        pose = joint(jointIndices.back()).respectToRobot()*finalTF;
        aastate = pose.rotation();
        state << pose.translation(), aastate.axis()*aastate.angle();

        err = goal-state;
        for(int i=3; i<6; i++)
            err[i] *= 0;
        err.normalize();

        Vector3d e = (target.translation() - pose.translation()).normalized()*0.005;

//        delta = Jinv*err*0.1;
//        clampMag(delta, deltaClamp);
        VectorXd d = Jinv*e;

//        jointValues += delta;
        jointValues += d;
        std::cout << iterations << " | Norm:" << delta.norm()
//                  << "\tdelta: " << delta.transpose() << "\tJoints:" << jointValues.transpose() << std::endl;
                  << " | " << (target.translation() - pose.translation()).norm()
                  << "\tErr: " << (goal-state).transpose() << std::endl;


        iterations++;
    } while(delta.norm() > tolerance && iterations < maxIterations);

}
Esempio n. 15
0
    double pfilter(Model & sim_model, Parameter & model_params, MCMCoptions & options, Particle &particles, Trajectory & output_traj, TimeSeriesData &epi_data, TreeData &tree_data, MultiTreeData &multitree_data) {
        int thread_max = omp_get_max_threads();
        gsl_rng** rngs = new gsl_rng*[thread_max];
        for (int thread = 0; thread < thread_max; thread++) {
            rngs[thread] = gsl_rng_alloc(gsl_rng_mt19937);
            gsl_rng_set(rngs[thread], omp_get_thread_num() + thread);
        }
        
        double loglik = 0.0;
        int num_groups = options.num_groups;
        int num_particles = options.particles;
        int init_seed = options.seed;
        int total_dt = options.total_dt;
        double sim_dt = options.sim_dt;
        int total_steps = ceil((double)total_dt/(double)options.pfilter_every);
        int add_dt = 0;
        double ESS_threshold = options.pfilter_threshold*(double)num_particles;
        Likelihood likelihood_calc;
        //        std::vector <Parameter> values;// (options.num_threads, model_params);
        //        for (int i=0; i!=options.num_threads; ++i) values.push_back(model_params);
        //        for (int i=0; i!=model_params.get_total_params(); ++i) values.push_back(model_params.get(i));
        std::vector <std::vector<double> > values(options.num_threads, std::vector<double>(model_params.get_total_params(), 0.0));
        for (int i=0; i!=options.num_threads; ++i) {
            for (int j=0; j!=model_params.get_total_params(); ++j) {
                values[i][j] = model_params.get(j);
            }
        }
        //        printf("Size of values = %d\n",values.size());
        double reporting_rate = 1.0;
        if (model_params.param_exists("reporting")) {
            reporting_rate = model_params.get("reporting");
        }
        std::vector <std::string> param_names = model_params.get_names_vector();
        std::vector <std::vector<std::string> > param_names_threads (options.num_threads);
        if (model_params.param_exists("time_before_data")) {
            add_dt = model_params.get("time_before_data");
        }
        if (options.save_traj) {
            if (add_dt > 0) {
                particles.start_particle_tracing(add_dt+total_dt, num_groups);
            }
            else if (add_dt < 0) {
                particles.start_particle_tracing(add_dt+total_dt, num_groups);
                total_steps = ceil((double)(total_dt+add_dt)/(double)options.pfilter_every);
            }
            else {
                particles.start_particle_tracing(total_dt, num_groups);
            }
        }
        std::vector <Model> models;
        for (int i=0; i<options.num_threads; ++i) {
            models.push_back(sim_model);
        }
        std::vector <int> add_dt_threads (options.num_threads, add_dt);
        std::vector <int> start_dt_threads (options.num_threads, 0);
        std::vector <int> end_dt_threads (options.num_threads, add_dt);
        std::vector <double> dt_threads (options.num_threads, sim_dt);
        std::vector <int> total_dt_threads(options.num_threads, total_dt);
        std::vector <double> reporting_rate_threads(options.num_threads, reporting_rate);
        std::vector <int> num_groups_threads(options.num_threads, num_groups);
        // Simulate model and calculate likelihood assuming no observed data
        if (model_params.param_exists("time_before_data")) {
            if (add_dt > 0) {
                omp_set_num_threads(options.num_threads);
                //                std::vector <Trajectory *> curr_trajs;
                //                for (int i=0; i!=num_particles; ++i) {
                //                    curr_trajs.push_back(particles.get_traj(i));
                //                }
#pragma omp parallel for shared(particles, values) schedule(static,1)
                for (int tn = 0; tn < thread_max; tn++) {
                    for (int i = tn; i < num_particles; i += thread_max) {
                        // Adjust length of trajectory
                        particles.get_traj(i)->resize(add_dt, num_groups);
                        models[tn].simulate(values[tn], param_names_threads[tn], particles.get_traj(i), 0, add_dt_threads[tn], dt_threads[tn], total_dt_threads[tn], rngs[tn]);
                        if (options.which_likelihood < 2) {
                            double w = likelihood_calc.binomial_lik(reporting_rate_threads[tn], particles.get_traj(i)->get_total_traj(), add_dt_threads[tn] + total_dt_threads[tn], 0, add_dt_threads[tn], num_groups_threads[tn], false);
                            particles.set_weight(w, i, false);
                        }
                        if (options.save_traj) {
                            particles.save_traj_to_matrix(i, 0, add_dt);
                            particles.save_ancestry(i, 0, add_dt);
                        }
                    }
                }
            }
        }
        init_seed += num_particles;
        int t=0;
        int start_dt;
        int end_dt;
        for (t = 0; t != total_steps; ++t) {
            //            std::vector<double> we(options.particles, 0.0), wg(options.particles, 0.0);
            start_dt = t*options.pfilter_every;
            end_dt = std::min(total_dt, (t + 1)*options.pfilter_every);
            std::fill(start_dt_threads.begin(), start_dt_threads.end(), start_dt);
            std::fill(end_dt_threads.begin(), end_dt_threads.end(), end_dt);
            omp_set_num_threads(options.num_threads);
#pragma omp parallel for shared (particles, values) schedule(static,1)
            for (int tn = 0; tn < thread_max; tn++) {
                for (int i = tn; i < num_particles; i+=thread_max) {
                    // Adjust length of trajectory
                    //                    if (tn==0) std::cout << i << ' ' << std::endl;
                    particles.get_traj(i)->resize(end_dt - start_dt, options.num_groups);
                    models[tn].simulate(values[tn], param_names_threads[tn], particles.get_traj(i), start_dt_threads[tn], end_dt_threads[tn], dt_threads[tn], total_dt_threads[tn], rngs[tn]);
                    double w = 1.0;
                    double temp = 0.0;
                    if (options.which_likelihood < 2) {
                        double A = particles.get_traj(i)->get_total_traj();
                        temp = likelihood_calc.binomial_lik(reporting_rate_threads[tn], A, epi_data.get_data_ptr(0), add_dt_threads[tn] + total_dt_threads[tn], start_dt_threads[tn], end_dt_threads[tn], add_dt_threads[tn], num_groups_threads[tn], false);
                        w *= temp;
                        //                        we[i] = log(temp);
                    }
                    if (options.which_likelihood != 1) {
                        temp = likelihood_calc.coalescent_lik(particles.get_traj(i)->get_traj_ptr(0, 0), particles.get_traj(i)->get_traj_ptr(1, 0),
                                                              tree_data.get_binomial_ptr(0), tree_data.get_interval_ptr(0), tree_data.get_ends_ptr(0),
                                                              start_dt_threads[tn], end_dt_threads[tn], add_dt_threads[tn], false);
                        w *= temp;
                        //                        wg[i] = log(temp);
                    }
                    particles.set_weight(w, i, true);
                    if (options.save_traj) {
                        particles.save_traj_to_matrix(i, start_dt_threads[tn] + add_dt_threads[tn], end_dt_threads[tn] + add_dt_threads[tn]);
                        particles.save_ancestry(i, start_dt_threads[tn] + add_dt_threads[tn], end_dt_threads[tn] + add_dt_threads[tn]);
                    }
                }
            }
            //            std::cout << "Epi Weight: " << std::accumulate(we.begin(), we.end(), 0.0) << " Gen Weight: " << std::accumulate(wg.begin(), wg.end(), 0.0) << " Total: " << particles.get_total_weight() << std::endl;
            double curr_ESS = particles.get_ESS();
            if (curr_ESS < ESS_threshold) {
                double total_weight = particles.get_total_weight();
                if (total_weight == 0.0) {
                    loglik += -0.1*std::numeric_limits<double>::max();
                    //                    std::cout << std::accumulate(epi_data.get_data_ptr(0)+start_dt, epi_data.get_data_ptr(0)+end_dt, 0.0) << " : " << particles.get_traj(0)->get_traj(0) << std::endl;
                    std::cout << "stop time: " << end_dt << std::endl;
                    break;
                } else {
                    loglik += log(total_weight) - log(num_particles);
                }
                particles.resample(options.rng[0]);
            }
            else {
                particles.reset_parents();
            }
        }
        if (options.save_traj) {
            output_traj.resize((total_dt+add_dt), num_groups);
            //if (loglik > -0.1*std::numeric_limits<double>::max()) {
            particles.retrace_traj(output_traj, options.rng[0]);
            //}
        }
        for (int i=0; i!=num_particles; ++i) {
            particles.get_traj(i)->reset();
        }
        std::vector < std::vector<double> >().swap(values);
        
        for (int thread = 0; thread < thread_max; thread++) {
            gsl_rng_free(rngs[thread]);
        }
        delete[] rngs;
        
        return (loglik);
    }
    virtual bool interpretCommand(RiaSocketServer* server, const QList<QByteArray>&  args, QDataStream& socketStream)
    {
        int caseId                = args[1].toInt();
        int gridIdx               = args[2].toInt();
        QString propertyName      = args[3];
        QString porosityModelName = args[4];
        
        RimCase*rimCase = server->findReservoir(caseId);
        if (rimCase == NULL)
        {
            server->errorMessageDialog()->showMessage(RiaSocketServer::tr("ResInsight SocketServer: \n") + RiaSocketServer::tr("Could not find the case with ID: \"%1\"").arg(caseId));

            // No data available
            socketStream << (quint64)0 << (quint64)0 <<  (quint64)0  << (quint64)0 ;
            return true;
        }

        RifReaderInterface::PorosityModelResultType porosityModelEnum = RifReaderInterface::MATRIX_RESULTS;
        if (porosityModelName == "Fracture")
        {
            porosityModelEnum = RifReaderInterface::FRACTURE_RESULTS;
        }

        size_t scalarResultIndex = cvf::UNDEFINED_SIZE_T;

        if (gridIdx < 0  || rimCase->reservoirData()->gridCount() <= (size_t)gridIdx)
        {
            server->errorMessageDialog()->showMessage("ResInsight SocketServer: riGetGridProperty : \n"
                                                      "The gridIndex \"" + QString::number(gridIdx) + "\" does not point to an existing grid." );
        }
        else
        {
            // Find the requested data
            if (rimCase && rimCase->results(porosityModelEnum))
            {
                scalarResultIndex = rimCase->results(porosityModelEnum)->findOrLoadScalarResult(propertyName);
            }
        }

        if (scalarResultIndex == cvf::UNDEFINED_SIZE_T)
        {
            server->errorMessageDialog()->showMessage(RiaSocketServer::tr("ResInsight SocketServer: \n") + RiaSocketServer::tr("Could not find the %1 model property named: \"%2\"").arg(porosityModelName).arg(propertyName));

            // No data available
            socketStream << (quint64)0 << (quint64)0 <<  (quint64)0  << (quint64)0 ;
            return true;
        }


        // Create a list of all the requested time steps

        std::vector<size_t> requestedTimesteps;

        if (args.size() <= 5)
        {
            // Select all
            for (size_t tsIdx = 0; tsIdx < rimCase->results(porosityModelEnum)->cellResults()->timeStepCount(scalarResultIndex); ++tsIdx)
            {
                requestedTimesteps.push_back(tsIdx);
            }
        }
        else
        {
            bool timeStepReadError = false;
            for (int argIdx = 5; argIdx < args.size(); ++argIdx)
            {
                bool conversionOk = false;
                int tsIdx = args[argIdx].toInt(&conversionOk);

                if (conversionOk)
                {
                    requestedTimesteps.push_back(tsIdx);
                }
                else
                {
                    timeStepReadError = true;
                }
            }

            if (timeStepReadError)
            {
                server->errorMessageDialog()->showMessage(RiaSocketServer::tr("ResInsight SocketServer: riGetGridProperty : \n")
                                                          + RiaSocketServer::tr("An error occured while interpreting the requested timesteps."));
            }

        }


        RigGridBase* rigGrid = rimCase->reservoirData()->grid(gridIdx);

        quint64 cellCountI = (quint64)rigGrid->cellCountI();
        quint64 cellCountJ = (quint64)rigGrid->cellCountJ();
        quint64 cellCountK = (quint64)rigGrid->cellCountK();

        socketStream << cellCountI;
        socketStream << cellCountJ;
        socketStream << cellCountK;

        // Write time step count

        quint64 timestepCount = (quint64)requestedTimesteps.size();
        socketStream << timestepCount;

        size_t doubleValueCount = cellCountI * cellCountJ * cellCountK * timestepCount * sizeof(double);
        std::vector<double> values(doubleValueCount);
        size_t valueIdx = 0;
        
        for (size_t tsIdx = 0; tsIdx < timestepCount; tsIdx++)
        {
            cvf::ref<cvf::StructGridScalarDataAccess> cellCenterDataAccessObject = rimCase->reservoirData()->dataAccessObject(rigGrid, porosityModelEnum, requestedTimesteps[tsIdx], scalarResultIndex);
            if (cellCenterDataAccessObject.isNull())
            {
                continue;
            }

            for (size_t cellIdx = 0; cellIdx < rigGrid->cellCount(); cellIdx++)
            {
                double cellValue = cellCenterDataAccessObject->cellScalar(cellIdx);
                if (cellValue == HUGE_VAL)
                {
                    cellValue = 0.0;
                }
                values[valueIdx++] = cellValue;
            }
        }

        server->currentClient()->write((const char *)values.data(), doubleValueCount);

        return true;
    }
  void Zoltan2Interface<Scalar, LocalOrdinal, GlobalOrdinal, Node>::Build(Level& level) const {
    FactoryMonitor m(*this, "Build", level);

    RCP<Matrix>      A        = Get< RCP<Matrix> >     (level, "A");
    RCP<const Map>   rowMap   = A->getRowMap();

    RCP<Xpetra::MultiVector<double, LocalOrdinal, GlobalOrdinal, Node> > coords   = Get< RCP<Xpetra::MultiVector<double, LocalOrdinal, GlobalOrdinal, Node> > >(level, "Coordinates");
    RCP<const Map>   map      = coords->getMap();

    GO               numParts = level.Get<GO>("number of partitions");

    size_t dim       = coords->getNumVectors();
    LO     blkSize   = A->GetFixedBlockSize();

    // Check that the number of local coordinates is consistent with the #rows in A
    TEUCHOS_TEST_FOR_EXCEPTION(rowMap->getNodeNumElements()/blkSize != coords->getLocalLength(), Exceptions::Incompatible,
                               "Coordinate vector length (" + toString(coords->getLocalLength()) << " is incompatible with number of block rows in A ("
                               + toString(rowMap->getNodeNumElements()/blkSize) + "The vector length should be the same as the number of mesh points.");
#ifdef HAVE_MUELU_DEBUG
    GO indexBase = rowMap->getIndexBase();
    GetOStream(Runtime0) << "Checking consistence of row and coordinates maps" << std::endl;
    // Make sure that logical blocks in row map coincide with logical nodes in coordinates map
    ArrayView<const GO> rowElements    = rowMap->getNodeElementList();
    ArrayView<const GO> coordsElements = map   ->getNodeElementList();
    for (LO i = 0; i < Teuchos::as<LO>(map->getNodeNumElements()); i++)
      TEUCHOS_TEST_FOR_EXCEPTION((coordsElements[i]-indexBase)*blkSize + indexBase != rowElements[i*blkSize],
                                 Exceptions::RuntimeError, "i = " << i << ", coords GID = " << coordsElements[i]
                                 << ", row GID = " << rowElements[i*blkSize] << ", blkSize = " << blkSize << std::endl);
#endif

    if (numParts == 1) {
      // Single processor, decomposition is trivial: all zeros
      RCP<Xpetra::Vector<GO,LO,GO,NO> > decomposition = Xpetra::VectorFactory<GO, LO, GO, NO>::Build(rowMap, true);
      Set(level, "Partition", decomposition);
      return;
    }

    GO numElements = map->getNodeNumElements();
    std::vector<const double*> values(dim), weights(1);
    std::vector<int>       strides;

    for (size_t k = 0; k < dim; k++)
      values[k] = coords->getData(k).get();

    const ParameterList& pL = GetParameterList();
    int rowWeight = pL.get<int>("rowWeight");
    GetOStream(Runtime0) << "Using weights formula: nnz + " << rowWeight << std::endl;

    Array<double> weightsPerRow(numElements);
    for (LO i = 0; i < numElements; i++) {
      weightsPerRow[i] = 0.0;
      for (LO j = 0; j < blkSize; j++) {
        weightsPerRow[i] += A->getNumEntriesInLocalRow(i*blkSize+j);
        // Zoltan2 pqJagged gets as good partitioning as Zoltan RCB in terms of nnz
        // but Zoltan also gets a good partioning in rows, which sometimes does not
        // happen for Zoltan2. So here is an attempt to get a better row partitioning
        // without significantly screwing up nnz partitioning
        // NOTE: no good heuristic here, the value was chosen almost randomly
        weightsPerRow[i] += rowWeight;
      }
    }
    weights[0] = weightsPerRow.getRawPtr();

    RCP<const ParameterList> providedList = pL.get<RCP<const ParameterList> >("ParameterList");
    ParameterList Zoltan2Params;
    if (providedList != Teuchos::null)
      Zoltan2Params = *providedList;

    // Merge defalt Zoltan2 parameters with user provided
    // If default and user parameters contain the same parameter name, user one is always preferred
    for (ParameterList::ConstIterator param = defaultZoltan2Params->begin(); param != defaultZoltan2Params->end(); param++) {
      const std::string& pName = defaultZoltan2Params->name(param);
      if (!Zoltan2Params.isParameter(pName))
        Zoltan2Params.set(pName, defaultZoltan2Params->get<std::string>(pName));
    }
    Zoltan2Params.set("num_global_parts", Teuchos::as<int>(numParts));

    GetOStream(Runtime0) << "Zoltan2 parameters:" << std::endl << "----------" << std::endl << Zoltan2Params << "----------" << std::endl;

    const std::string& algo = Zoltan2Params.get<std::string>("algorithm");
    TEUCHOS_TEST_FOR_EXCEPTION(algo != "multijagged" &&
                               algo != "rcb",
                               Exceptions::RuntimeError, "Unknown partitioning algorithm: \"" << algo << "\"");

    typedef Zoltan2::BasicVectorAdapter<Zoltan2::BasicUserTypes<double,GO,LO,GO> > InputAdapterType;
    typedef Zoltan2::PartitioningProblem<InputAdapterType> ProblemType;

    InputAdapterType adapter(numElements, map->getNodeElementList().getRawPtr(), values, strides, weights, strides);

    RCP<const Teuchos::MpiComm<int> >            dupMpiComm = rcp_dynamic_cast<const Teuchos::MpiComm<int> >(rowMap->getComm()->duplicate());
    RCP<const Teuchos::OpaqueWrapper<MPI_Comm> > zoltanComm = dupMpiComm->getRawMpiComm();

    RCP<ProblemType> problem(new ProblemType(&adapter, &Zoltan2Params, (*zoltanComm)()));

    {
      SubFactoryMonitor m1(*this, "Zoltan2 " + toString(algo), level);
      problem->solve();
    }

    RCP<Xpetra::Vector<GO,LO,GO,NO> > decomposition = Xpetra::VectorFactory<GO,LO,GO,NO>::Build(rowMap, false);
    ArrayRCP<GO>                      decompEntries = decomposition->getDataNonConst(0);

    const typename InputAdapterType::part_t * parts = problem->getSolution().getPartListView();

    for (GO i = 0; i < numElements; i++) {
      int partNum = parts[i];

      for (LO j = 0; j < blkSize; j++)
        decompEntries[i*blkSize + j] = partNum;
    }

    Set(level, "Partition", decomposition);

  } //Build()
int main(int argc, char ** argv)
{
  std::string inputFileName = "test_InterpolationSurrogate/queso_input.txt";
  const char * test_srcdir = std::getenv("srcdir");
  if (test_srcdir)
    inputFileName = test_srcdir + ('/' + inputFileName);

#ifdef QUESO_HAS_MPI
  MPI_Init(&argc, &argv);
  QUESO::FullEnvironment env(MPI_COMM_WORLD, inputFileName, "", NULL);
#else
  QUESO::FullEnvironment env(inputFileName, "", NULL);
#endif

  int return_flag = 0;

  QUESO::VectorSpace<QUESO::GslVector, QUESO::GslMatrix>
    paramSpace(env,"param_", 3, NULL);

  QUESO::GslVector paramMins(paramSpace.zeroVector());
  paramMins[0] = -1;
  paramMins[1] = -0.5;
  paramMins[2] = 1.1;

  QUESO::GslVector paramMaxs(paramSpace.zeroVector());
  paramMaxs[0] = 0.9;
  paramMaxs[1] = 3.14;
  paramMaxs[2] = 2.1;

  QUESO::BoxSubset<QUESO::GslVector, QUESO::GslMatrix>
    paramDomain("param_", paramSpace, paramMins, paramMaxs);

  std::vector<unsigned int> n_points(3);
  n_points[0] = 101;
  n_points[1] = 51;
  n_points[2] = 31;

  QUESO::InterpolationSurrogateData<QUESO::GslVector, QUESO::GslMatrix>
    data(paramDomain,n_points);

  std::vector<double> values(n_points[0]*n_points[1]*n_points[2]);

  double spacing_x = (paramMaxs[0] - paramMins[0])/(n_points[0]-1);
  double spacing_y = (paramMaxs[1] - paramMins[1])/(n_points[1]-1);
  double spacing_z = (paramMaxs[2] - paramMins[2])/(n_points[2]-1);

  for( unsigned int i = 0; i < n_points[0]; i++ )
    {
      for( unsigned int j = 0; j < n_points[1]; j++ )
        {
          for( unsigned int k = 0; k < n_points[2]; k++ )
            {
              unsigned int n = i + j*n_points[0] + k*n_points[0]*n_points[1];

              double x = paramMins[0] + i*spacing_x;
              double y = paramMins[1] + j*spacing_y;
              double z = paramMins[2] + k*spacing_z;

              values[n] = three_d_fn(x,y,z);
            }
        }
    }

  data.set_values( values );

  QUESO::LinearLagrangeInterpolationSurrogate<QUESO::GslVector,QUESO::GslMatrix>
    three_d_surrogate( data );

  QUESO::GslVector domainVector(paramSpace.zeroVector());
  domainVector[0] = -0.4;
  domainVector[1] = 3.0;
  domainVector[2] = 1.5;

  double test_val = three_d_surrogate.evaluate(domainVector);

  double exact_val = three_d_fn(domainVector[0],domainVector[1],domainVector[2]);

  double tol = 2.0*std::numeric_limits<double>::epsilon();

  double rel_error = (test_val - exact_val)/exact_val;

  if( std::fabs(rel_error) > tol )
    {
      std::cerr << "ERROR: Tolerance exceeded for 3D Lagrange interpolation test."
                << std::endl
                << " test_val  = " << test_val << std::endl
                << " exact_val = " << exact_val << std::endl
                << " rel_error = " << rel_error << std::endl
                << " tol       = " << tol << std::endl;

      return_flag = 1;
    }

#ifdef QUESO_HAS_MPI
  MPI_Finalize();
#endif
  return return_flag;
}
Esempio n. 19
0
double LagrangeField3D::valueNode (size_t node) const
{
  return node > 0 && node <= nno ? values(node) : 0.0;
}
Esempio n. 20
0
evar earrayof<evar,evar>::getvar(size_t i) const
{
  return(values(i));
}
SI_Error SMDEnvelope::Write ( FILE* l_fptr, int rigid, SMDNodeList* in_pNodeList )
{

	CSIBCVector3D*	l_pPosition = NULL;
	CSIBCVector3D*	l_pNormal = NULL;
	CSIBCVector2D*	l_pUV = NULL;
	
	XSI::Primitive l_pPrim = m_pModel.GetActivePrimitive();
	
	if ( !l_pPrim.IsValid() )
		return SI_ERR_ERROR_MSG;

	XSI::Application app;

	XSI::UIToolkit kit = app.GetUIToolkit();
	XSI::ProgressBar	m_pBar = kit.GetProgressBar();
	
	m_pBar.PutMaximum( 100 );
	m_pBar.PutMinimum( 1 );
	m_pBar.PutStep( 1 );
	m_pBar.PutValue( 1 );
	m_pBar.PutCaption( L"Writing vertex data..." );
	m_pBar.PutStatusText( L"" );
	m_pBar.PutVisible( true );

	//
	// Get default texture name
	//

	char l_szDefaultTextureName[MAX_PATH];
	XSI::OGLTexture l_pDefaultTexture = m_pModel.GetMaterial().GetOGLTexture();

	if ( !l_pDefaultTexture.IsValid() )
	{
		XSILogMessage ( "Material on enveloped mesh has no texture!", XSI::siErrorMsg );
		sprintf ( l_szDefaultTextureName, "default.tga" );
	} else {

		W2AHelper2 ( l_szDefaultTextureName, l_pDefaultTexture.GetFullName().GetWideString() );
		
		char l_szTextureFile[MAX_PATH];
		char l_szTextureExt[MAX_PATH];
		
		_splitpath ( l_szDefaultTextureName, NULL, NULL, l_szTextureFile, l_szTextureExt );
		sprintf ( l_szDefaultTextureName, "%s%s",  l_szTextureFile, l_szTextureExt );
	}

	XSI::Geometry l_pGeo = l_pPrim.GetGeometry();
	XSI::CPointRefArray l_pPoints = l_pGeo.GetPoints();
	XSI::PolygonMesh l_pPolyMesh = l_pGeo;

	XSI::CPointRefArray pointRefArray(l_pPolyMesh.GetPoints());
	XSI::MATH::CVector3Array  positionArray(pointRefArray.GetPositionArray());
	XSI::CPolygonNodeRefArray nodeRefArray(l_pPolyMesh.GetNodes());
	XSI::MATH::CVector3Array  normalArray(nodeRefArray.GetNormalArray());

	//Take care of the UV's
	XSI::CRefArray		clusterRefArray;
	l_pPolyMesh.GetClusters().Filter(XSI::siSampledPointCluster,XSI::CStringArray(),L"",clusterRefArray);

	XSI::Cluster			samplePointClusterUV;
	XSI::CRefArray       uvClusterPropertiesRefArray;
	
	int i;
	for(i=0;i < clusterRefArray.GetCount(); i++)
	{
		XSI::Cluster cluster(clusterRefArray[i]);		
		if(cluster.GetProperties().Filter(XSI::siClsUVSpaceTxtType,XSI::CStringArray(), L"",uvClusterPropertiesRefArray) == XSI::CStatus::OK)
		{
			samplePointClusterUV = cluster;			
			break;
		}
	}

	XSI::ClusterProperty uvProp(uvClusterPropertiesRefArray[0]);
	XSI::CClusterPropertyElementArray uvElementArray = uvProp.GetElements();
	XSI::CDoubleArray uvValueArray = uvElementArray.GetArray();
	long lnbUV= (long)(uvValueArray.GetCount() / 3);

	//
	// Make sure that UVs are present
	//

	if ( !samplePointClusterUV.IsValid() )
	{
		XSILogMessage ( "Invalid .SMD: Enveloped mesh doesn't have any UVs.", XSI::siErrorMsg );

		return SI_ERR_ERROR_MSG;
	}

	m_pBar.PutCaption( L"Analyzing clusters..." );
	m_pBar.PutStatusText( L"" );
	m_pBar.PutVisible( true );


	SMDNode* in_pNode = in_pNodeList->GetByName ( m_pModel.GetFullName() );
	CSIBCArray < CSIBCVector3D >	l_pVertexNormals;
	ComputeVertexNormals ( l_pVertexNormals, l_pGeo );

	XSI::CRefArray		allClusters;
	l_pPolyMesh.GetClusters().Filter(L"poly",XSI::CStringArray(),L"",allClusters);

	CSIBCArray<MateriaList>	matList;
	
	for (int c=0;c<allClusters.GetCount();c++)
	{
		XSI::Cluster Thecluster = allClusters[c];
		XSI::Material l_pMat = Thecluster.GetMaterial();
		XSI::OGLTexture l_pTexture = l_pMat.GetOGLTexture();
		matList.Extend(1);

		if ( !l_pTexture.IsValid() )
		{
			char mess[1024];
			sprintf ( mess, "Cluster #%d has no texture! Bypassing.", c);
			XSILogMessage ( mess, XSI::siErrorMsg );
			continue;
		}

		W2AHelper2 ( matList[matList.GetUsed()-1].texture, l_pTexture.GetFullName().GetWideString() );

		char l_szTextureFile[MAX_PATH];
		char l_szTextureExt[MAX_PATH];

		_splitpath ( matList[matList.GetUsed()-1].texture, NULL, NULL, l_szTextureFile, l_szTextureExt );
		sprintf ( matList[matList.GetUsed()-1].texture, "%s%s",  l_szTextureFile, l_szTextureExt );


		XSI::CClusterElementArray clusterElementArray = Thecluster.GetElements();
		XSI::CLongArray values(clusterElementArray.GetArray());
		long countPolyIndices = values.GetCount();
		matList[matList.GetUsed()-1].polyIndices.Extend(countPolyIndices);
		for (int v=0;v<countPolyIndices;v++)
		{
			matList[matList.GetUsed()-1].polyIndices[v] = values[v];
		}
	}

	XSI::CTriangleRefArray tris = l_pPolyMesh.GetTriangles();

	
	m_pBar.PutCaption( L"Processing geometry..." );
	m_pBar.PutStatusText( L"" );
	m_pBar.PutVisible( true );

	long progress_value = 0;
	long last_progress_value = 0;

		int vindex = 0;
		int vii = 0;

		for (int v=0;v<tris.GetCount();v++)
		{
			progress_value = (long)(((float)v / (float)tris.GetCount()) * 100.0f);

			if ( progress_value != last_progress_value )
			{
				last_progress_value = progress_value;
				m_pBar.PutValue ( progress_value );
				
				if ( m_pBar.IsCancelPressed() )
				{
					if ( MessageBox ( NULL, "Cancelling the export will create a corrupted SMD file.\n\n Are you sure?", "Cancel Export", MB_YESNO|MB_ICONWARNING ) == IDYES )
					{
						break;
					} else {

						m_pBar.PutVisible( true );
					}

				}

			}
			
			XSI::Triangle tri = tris[v];

			char* textureName = l_szDefaultTextureName;

			long polyI = tri.GetPolygonIndex();

			for (int p=0;p<matList.GetUsed();p++)
			{
				bool found = false;

				for (int h=0;h<matList[p].polyIndices.GetUsed();h++)
				{
					if ( matList[p].polyIndices[h] == polyI )
					{
						textureName = matList[p].texture;
						found = true;
						break;
					}

				}

				if ( found )
					break;
			}


			for (int i=0;i<3;i++)
			{
				//
				// Build a vertex
				//

				CSIBCVector3D	l_vPosition = CSIBCVector3D( (float)tri.GetPositionArray()[i].GetX(), 
																(float)tri.GetPositionArray()[i].GetY(), 
																(float)tri.GetPositionArray()[i].GetZ() );

				XSI::CTriangleVertexRefArray vRef = tri.GetPoints();
				XSI::TriangleVertex l_vTriangleVertex = vRef[i];
				

				CSIBCVector3D	l_vNormal = CSIBCVector3D ( (float)l_vTriangleVertex.GetNormal().GetX(),
															(float)l_vTriangleVertex.GetNormal().GetY(),
															(float)l_vTriangleVertex.GetNormal().GetZ() );

				//if ( SMDType == 1 )
				//{
				//	l_vNormal	= l_pVertexNormals [ l_pVertexList[vii] ];
				//} 

				if (( SMDType == 0 ) || ( SMDType == 1 ))
				{
					CSIBCMatrix4x4 l_pResult = in_pNode->GetMatrix();

					l_pResult.Multiply ( l_vPosition, l_vPosition );
					l_pResult.Multiply ( l_vNormal, l_vNormal );
					l_vNormal = l_vNormal.Normalize();
					
				}

				XSI::CTriangleVertexRefArray l_TriRef = tri.GetPoints();
				XSI::CUVArray l_vuvArray = l_TriRef.GetUVArray();
				XSI::CUV l_uv = l_vuvArray[i];
				int c = l_vuvArray.GetCount();
				long vertexIndex = tri.GetIndexArray()[i];

				CSIBCVector2D	l_vUV;
				l_vUV.m_fX = (float)l_uv.u;
				l_vUV.m_fY = (float)l_uv.v;

				//
				// Build weight list
				//

				SMDVertex*	l_pWeights = m_pVertexList[vertexIndex];

				// not sure the line above works correclty, if not, uncomment
				// the code below
				//for (int w=0;w<m_pVertexList.GetUsed();w++)
				//{
				//	if ( m_pVertexList[w]->GetIndex() == vertexIndex )
				//	{
				//		l_pWeights = m_pVertexList[w];
				//		break;
				//		
				//	}

				//}

				vii++;
				
				//
				// Now output
				//

			
			//	XSI::OGLTexture l_pTexture = useMat.GetOGLTexture();

				if ( vindex == 0 )
				{
					fprintf ( l_fptr, "%s\n", textureName );
				}

				if ( !rigid )
				{
					CSIBCString l_szWeight;
					l_szWeight.Concat ( l_pWeights->GetNumWeights() );
					l_szWeight.Concat (" ");
					for (int f=0;f<l_pWeights->GetNumWeights();f++)
					{
						l_szWeight.Concat ( l_pWeights->GetWeight(f)->m_iBoneID );
						l_szWeight.Concat (" ");
						l_szWeight.Concat ( l_pWeights->GetWeight(f)->m_fWeight );
						l_szWeight.Concat (" ");
					}

					fprintf ( l_fptr, 
								"0 %f %f %f %f %f %f %f %f %s\n",
								l_vPosition.m_fX,
								l_vPosition.m_fY,
								l_vPosition.m_fZ,
								l_vNormal.m_fX,
								l_vNormal.m_fY,
								l_vNormal.m_fZ,
								l_vUV.m_fX,
								l_vUV.m_fY,
								l_szWeight.GetText());
				} else {

					fprintf ( l_fptr, 
								"%d %f %f %f %f %f %f %f %f \n",
								l_pWeights->GetWeight(0)->m_iBoneID,
								l_vPosition.m_fX,
								l_vPosition.m_fY,
								l_vPosition.m_fZ,
								l_vNormal.m_fX,
								l_vNormal.m_fY,
								l_vNormal.m_fZ,
								l_vUV.m_fX,
								l_vUV.m_fY
								);
				}



				vindex++;

				if ( vindex == 3 )
					vindex = 0;
			}

		}

		
	

	return SI_SUCCESS;
}
int PostKriging::execute( GsTL_project* ) { 


	bool ok;
	std::vector< Grid_continuous_property* >::const_iterator it_prop;;
	int nprop = props.size();
	std::vector< float > values( props.size() );

	for(int node_id=0; node_id < grid_->size(); ++node_id ) {
		ok=true;
		if(is_non_param_cdf_) {

			for(int k = 0; k < props.size(); ++k ) {
				ok = ok&& props[k]->is_informed( node_id );
				if(ok) values[k] = props[k]->get_value( node_id );
			}
			if(ok) non_param_cdf_->p_set(values.begin(),values.end());
		}else {
			ok =  props[0]->is_informed( node_id ) && props[1]->is_informed( node_id );
			if(ok) {
				gaussian_cdf_->mean() = props[0]->get_value( node_id );
				gaussian_cdf_->variance() = props[1]->get_value( node_id );
				//gaussian_cdf_->mean( props[0]->get_value( node_id ) );
				//gaussian_cdf_->variance( props[1]->get_value( node_id ) );
			}

		}
		if(ok) {
		//	if( is_non_param_cdf_)	cdf_ = dynamic_cast< Cdf<double>* >( non_param_cdf_ );
		//	else cdf_ = dynamic_cast< Cdf<double>* >( gaussian_cdf_ );

			if(mean_ ) {
				if( is_non_param_cdf_ ) mean_prop_->set_value( non_param_cdf_->mean(), node_id );
				else mean_prop_->set_value( gaussian_cdf_->mean(), node_id );
			}

			if(cond_var_){
				if( is_non_param_cdf_ ) cond_var_prop_->set_value( non_param_cdf_->variance(), node_id );
				else cond_var_prop_->set_value( gaussian_cdf_->variance(), node_id );
			}


			if(iqr_)
				if( is_non_param_cdf_ ) 
					iqr_prop_->set_value( non_param_cdf_->inverse(0.75) -  non_param_cdf_->inverse(0.25), node_id);
				else  iqr_prop_->set_value( gaussian_cdf_->inverse(0.75) -  gaussian_cdf_->inverse(0.25), node_id);

			if(quantile_)
			{
				std::vector<Grid_continuous_property*>::iterator it_prop = quantile_props_.begin();
				std::vector<float>::iterator it_val = quantile_vals_.begin();
				for(;it_prop != quantile_props_.end(); ++it_prop, ++it_val)
				{
					if( is_non_param_cdf_ )
						(*it_prop)->set_value( non_param_cdf_->inverse(*it_val), node_id);
					else (*it_prop)->set_value( gaussian_cdf_->inverse(*it_val), node_id);
				}
			}

			if(prob_above_)
			{
				std::vector<Grid_continuous_property*>::iterator it_prop = prob_above_props_.begin();
				std::vector<float>::iterator it_val = prob_above_vals_.begin();
				for(;it_prop != prob_above_props_.end(); ++it_prop, ++it_val) {
					if( is_non_param_cdf_ )
						(*it_prop)->set_value( 1 - non_param_cdf_->prob(*it_val), node_id);
					else (*it_prop)->set_value( 1 - gaussian_cdf_->prob(*it_val), node_id);
				}
			}

			if(prob_below_)
			{
				std::vector<Grid_continuous_property*>::iterator it_prop = prob_below_props_.begin();
				std::vector<float>::iterator it_val = prob_below_vals_.begin();
				for(;it_prop != prob_below_props_.end(); ++it_prop, ++it_val){
					if( is_non_param_cdf_ )
						(*it_prop)->set_value( non_param_cdf_->prob(*it_val), node_id);
					else (*it_prop)->set_value( gaussian_cdf_->prob(*it_val), node_id);
				}
			}
		}

	}

	return 0;
}
Esempio n. 23
0
/*
 * Example on how to broadcast a message to Aseba nodes.
 * The message ID depend on the events defined inside your Aseba code.
 * In this simple example, we send 1 data with the message (can be zero,
 * or more).
 */
void MainWindow::sendUserMessage()
{
	QVector<int> values(0);
	values.append(msgValue->text().toInt());
	dashelInterface.sendEvent(msgId->text().toInt(), values);
}
static void ParsePlistPluginInfo(nsPluginInfo& info, CFBundleRef bundle)
{
  CFDictionaryRef mimeDict = ParsePlistForMIMETypesFilename(bundle);
  
  if (!mimeDict) {
    CFTypeRef mimeTypes = ::CFBundleGetValueForInfoDictionaryKey(bundle, CFSTR("WebPluginMIMETypes"));
    if (!mimeTypes || ::CFGetTypeID(mimeTypes) != ::CFDictionaryGetTypeID() || ::CFDictionaryGetCount(static_cast<CFDictionaryRef>(mimeTypes)) == 0)
      return;
    mimeDict = static_cast<CFDictionaryRef>(::CFRetain(mimeTypes));
  }
  
  AutoCFTypeObject mimeDictAutorelease(mimeDict);
  int mimeDictKeyCount = ::CFDictionaryGetCount(mimeDict);

  // Allocate memory for mime data
  int mimeDataArraySize = mimeDictKeyCount * sizeof(char*);
  info.fMimeTypeArray = static_cast<char**>(NS_Alloc(mimeDataArraySize));
  if (!info.fMimeTypeArray)
    return;
  memset(info.fMimeTypeArray, 0, mimeDataArraySize);
  info.fExtensionArray = static_cast<char**>(NS_Alloc(mimeDataArraySize));
  if (!info.fExtensionArray)
    return;
  memset(info.fExtensionArray, 0, mimeDataArraySize);
  info.fMimeDescriptionArray = static_cast<char**>(NS_Alloc(mimeDataArraySize));
  if (!info.fMimeDescriptionArray)
    return;
  memset(info.fMimeDescriptionArray, 0, mimeDataArraySize);

  // Allocate memory for mime dictionary keys and values
  nsAutoArrayPtr<CFTypeRef> keys(new CFTypeRef[mimeDictKeyCount]);
  if (!keys)
    return;
  nsAutoArrayPtr<CFTypeRef> values(new CFTypeRef[mimeDictKeyCount]);
  if (!values)
    return;
  
  info.fVariantCount = 0;

  ::CFDictionaryGetKeysAndValues(mimeDict, keys, values);
  for (int i = 0; i < mimeDictKeyCount; i++) {
    CFTypeRef mimeString = keys[i];
    if (!mimeString || ::CFGetTypeID(mimeString) != ::CFStringGetTypeID()) {
      continue;
    }
    CFTypeRef mimeDict = values[i];
    if (mimeDict && ::CFGetTypeID(mimeDict) == ::CFDictionaryGetTypeID()) {
      if (!MimeTypeEnabled(static_cast<CFDictionaryRef>(mimeDict))) {
        continue;
      }
      info.fMimeTypeArray[info.fVariantCount] = CFStringRefToUTF8Buffer(static_cast<CFStringRef>(mimeString));
      if (!info.fMimeTypeArray[info.fVariantCount]) {
        continue;
      }
      CFTypeRef extensions = ::CFDictionaryGetValue(static_cast<CFDictionaryRef>(mimeDict), CFSTR("WebPluginExtensions"));
      if (extensions && ::CFGetTypeID(extensions) == ::CFArrayGetTypeID()) {
        int extensionCount = ::CFArrayGetCount(static_cast<CFArrayRef>(extensions));
        CFMutableStringRef extensionList = ::CFStringCreateMutable(kCFAllocatorDefault, 0);
        for (int j = 0; j < extensionCount; j++) {
          CFTypeRef extension = ::CFArrayGetValueAtIndex(static_cast<CFArrayRef>(extensions), j);
          if (extension && ::CFGetTypeID(extension) == ::CFStringGetTypeID()) {
            if (j > 0)
              ::CFStringAppend(extensionList, CFSTR(","));
            ::CFStringAppend(static_cast<CFMutableStringRef>(extensionList), static_cast<CFStringRef>(extension));
          }
        }
        info.fExtensionArray[info.fVariantCount] = CFStringRefToUTF8Buffer(static_cast<CFStringRef>(extensionList));
        ::CFRelease(extensionList);
      }
      CFTypeRef description = ::CFDictionaryGetValue(static_cast<CFDictionaryRef>(mimeDict), CFSTR("WebPluginTypeDescription"));
      if (description && ::CFGetTypeID(description) == ::CFStringGetTypeID())
        info.fMimeDescriptionArray[info.fVariantCount] = CFStringRefToUTF8Buffer(static_cast<CFStringRef>(description));
    }
    info.fVariantCount++;
  }
}
static QVariant qtValue(CFPropertyListRef cfvalue)
{
    if (!cfvalue)
        return QVariant();

    CFTypeID typeId = CFGetTypeID(cfvalue);

    /*
        Sorted grossly from most to least frequent type.
    */
    if (typeId == CFStringGetTypeID()) {
        return QSettingsPrivate::stringToVariant(QCFString::toQString(static_cast<CFStringRef>(cfvalue)));
    } else if (typeId == CFNumberGetTypeID()) {
        CFNumberRef cfnumber = static_cast<CFNumberRef>(cfvalue);
        if (CFNumberIsFloatType(cfnumber)) {
            double d;
            CFNumberGetValue(cfnumber, kCFNumberDoubleType, &d);
            return d;
        } else {
            int i;
            qint64 ll;

            if (CFNumberGetValue(cfnumber, kCFNumberIntType, &i))
                return i;
            CFNumberGetValue(cfnumber, kCFNumberLongLongType, &ll);
            return ll;
        }
    } else if (typeId == CFArrayGetTypeID()) {
        CFArrayRef cfarray = static_cast<CFArrayRef>(cfvalue);
        QList<QVariant> list;
        CFIndex size = CFArrayGetCount(cfarray);
        bool metNonString = false;
        for (CFIndex i = 0; i < size; ++i) {
            QVariant value = qtValue(CFArrayGetValueAtIndex(cfarray, i));
            if (value.type() != QVariant::String)
                metNonString = true;
            list << value;
        }
        if (metNonString)
            return list;
        else
            return QVariant(list).toStringList();
    } else if (typeId == CFBooleanGetTypeID()) {
        return (bool)CFBooleanGetValue(static_cast<CFBooleanRef>(cfvalue));
    } else if (typeId == CFDataGetTypeID()) {
        CFDataRef cfdata = static_cast<CFDataRef>(cfvalue);
        return QByteArray(reinterpret_cast<const char *>(CFDataGetBytePtr(cfdata)),
                          CFDataGetLength(cfdata));
    } else if (typeId == CFDictionaryGetTypeID()) {
        CFDictionaryRef cfdict = static_cast<CFDictionaryRef>(cfvalue);
        CFTypeID arrayTypeId = CFArrayGetTypeID();
        int size = (int)CFDictionaryGetCount(cfdict);
        QVarLengthArray<CFPropertyListRef> keys(size);
        QVarLengthArray<CFPropertyListRef> values(size);
        CFDictionaryGetKeysAndValues(cfdict, keys.data(), values.data());

        QMultiMap<QString, QVariant> map;
        for (int i = 0; i < size; ++i) {
            QString key = QCFString::toQString(static_cast<CFStringRef>(keys[i]));

            if (CFGetTypeID(values[i]) == arrayTypeId) {
                CFArrayRef cfarray = static_cast<CFArrayRef>(values[i]);
                CFIndex arraySize = CFArrayGetCount(cfarray);
                for (CFIndex j = arraySize - 1; j >= 0; --j)
                    map.insert(key, qtValue(CFArrayGetValueAtIndex(cfarray, j)));
            } else {
                map.insert(key, qtValue(values[i]));
            }
        }
        return map;
    } else if (typeId == CFDateGetTypeID()) {
        QDateTime dt;
        dt.setTime_t((uint)kCFAbsoluteTimeIntervalSince1970);
        return dt.addSecs((int)CFDateGetAbsoluteTime(static_cast<CFDateRef>(cfvalue)));
    }
    return QVariant();
}
KPrPageEffectRegistry::~KPrPageEffectRegistry()
{
    qDeleteAll(doubleEntries());
    qDeleteAll(values());
    delete d;
}
void InbandTextTrackPrivateAVF::processCueAttributes(CFAttributedStringRef attributedString, GenericCueData* cueData)
{
    // Some of the attributes we translate into per-cue WebVTT settings are are repeated on each part of an attributed string so only
    // process the first instance of each.
    enum AttributeFlags {
        Line = 1 << 0,
        Position = 1 << 1,
        Size = 1 << 2,
        Vertical = 1 << 3,
        Align = 1 << 4,
        FontName = 1 << 5
    };
    unsigned processed = 0;

    StringBuilder content;
    String attributedStringValue = CFAttributedStringGetString(attributedString);
    CFIndex length = attributedStringValue.length();
    if (!length)
        return;

    CFRange effectiveRange = CFRangeMake(0, 0);
    while ((effectiveRange.location + effectiveRange.length) < length) {

        CFDictionaryRef attributes = CFAttributedStringGetAttributes(attributedString, effectiveRange.location + effectiveRange.length, &effectiveRange);
        if (!attributes)
            continue;

        StringBuilder tagStart;
        CFStringRef valueString;
        String tagEnd;
        CFIndex attributeCount = CFDictionaryGetCount(attributes);
        Vector<const void*> keys(attributeCount);
        Vector<const void*> values(attributeCount);
        CFDictionaryGetKeysAndValues(attributes, keys.data(), values.data());

        for (CFIndex i = 0; i < attributeCount; ++i) {
            CFStringRef key = static_cast<CFStringRef>(keys[i]);
            CFTypeRef value = values[i];
            if (CFGetTypeID(key) != CFStringGetTypeID() || !CFStringGetLength(key))
                continue;

            if (CFStringCompare(key, kCMTextMarkupAttribute_Alignment, 0) == kCFCompareEqualTo) {
                valueString = static_cast<CFStringRef>(value);
                if (CFGetTypeID(valueString) != CFStringGetTypeID() || !CFStringGetLength(valueString))
                    continue;
                if (processed & Align)
                    continue;
                processed |= Align;

                if (CFStringCompare(valueString, kCMTextMarkupAlignmentType_Start, 0) == kCFCompareEqualTo)
                    cueData->setAlign(GenericCueData::Start);
                else if (CFStringCompare(valueString, kCMTextMarkupAlignmentType_Middle, 0) == kCFCompareEqualTo)
                    cueData->setAlign(GenericCueData::Middle);
                else if (CFStringCompare(valueString, kCMTextMarkupAlignmentType_End, 0) == kCFCompareEqualTo)
                    cueData->setAlign(GenericCueData::End);
                else
                    ASSERT_NOT_REACHED();

                continue;
            }

            if (CFStringCompare(key, kCMTextMarkupAttribute_BoldStyle, 0) == kCFCompareEqualTo) {
                if (static_cast<CFBooleanRef>(value) != kCFBooleanTrue)
                    continue;

                tagStart.append("<b>");
                tagEnd.insert("</b>", 0);
                continue;
            }

            if (CFStringCompare(key, kCMTextMarkupAttribute_ItalicStyle, 0) == kCFCompareEqualTo) {
                if (static_cast<CFBooleanRef>(value) != kCFBooleanTrue)
                    continue;

                tagStart.append("<i>");
                tagEnd.insert("</i>", 0);
                continue;
            }

            if (CFStringCompare(key, kCMTextMarkupAttribute_UnderlineStyle, 0) == kCFCompareEqualTo) {
                if (static_cast<CFBooleanRef>(value) != kCFBooleanTrue)
                    continue;

                tagStart.append("<u>");
                tagEnd.insert("</u>", 0);
                continue;
            }

            if (CFStringCompare(key, kCMTextMarkupAttribute_OrthogonalLinePositionPercentageRelativeToWritingDirection, 0) == kCFCompareEqualTo) {
                if (CFGetTypeID(value) != CFNumberGetTypeID())
                    continue;
                if (processed & Line)
                    continue;
                processed |= Line;

                CFNumberRef valueNumber = static_cast<CFNumberRef>(value);
                double line;
                CFNumberGetValue(valueNumber, kCFNumberFloat64Type, &line);
                cueData->setLine(line);
                continue;
            }

            if (CFStringCompare(key, kCMTextMarkupAttribute_TextPositionPercentageRelativeToWritingDirection, 0) == kCFCompareEqualTo) {
                if (CFGetTypeID(value) != CFNumberGetTypeID())
                    continue;
                if (processed & Position)
                    continue;
                processed |= Position;

                CFNumberRef valueNumber = static_cast<CFNumberRef>(value);
                double position;
                CFNumberGetValue(valueNumber, kCFNumberFloat64Type, &position);
                cueData->setPosition(position);
                continue;
            }

            if (CFStringCompare(key, kCMTextMarkupAttribute_WritingDirectionSizePercentage, 0) == kCFCompareEqualTo) {
                if (CFGetTypeID(value) != CFNumberGetTypeID())
                    continue;
                if (processed & Size)
                    continue;
                processed |= Size;

                CFNumberRef valueNumber = static_cast<CFNumberRef>(value);
                double size;
                CFNumberGetValue(valueNumber, kCFNumberFloat64Type, &size);
                cueData->setSize(size);
                continue;
            }

            if (CFStringCompare(key, kCMTextMarkupAttribute_VerticalLayout, 0) == kCFCompareEqualTo) {
                valueString = static_cast<CFStringRef>(value);
                if (CFGetTypeID(valueString) != CFStringGetTypeID() || !CFStringGetLength(valueString))
                    continue;
                
                if (CFStringCompare(valueString, kCMTextVerticalLayout_LeftToRight, 0) == kCFCompareEqualTo)
                    tagStart.append(leftToRightMark);
                else if (CFStringCompare(valueString, kCMTextVerticalLayout_RightToLeft, 0) == kCFCompareEqualTo)
                    tagStart.append(rightToLeftMark);
                continue;
            }

            if (CFStringCompare(key, kCMTextMarkupAttribute_BaseFontSizePercentageRelativeToVideoHeight, 0) == kCFCompareEqualTo) {
                if (CFGetTypeID(value) != CFNumberGetTypeID())
                    continue;
                
                CFNumberRef valueNumber = static_cast<CFNumberRef>(value);
                double baseFontSize;
                CFNumberGetValue(valueNumber, kCFNumberFloat64Type, &baseFontSize);
                cueData->setBaseFontSize(baseFontSize);
                continue;
            }

            if (CFStringCompare(key, kCMTextMarkupAttribute_RelativeFontSize, 0) == kCFCompareEqualTo) {
                if (CFGetTypeID(value) != CFNumberGetTypeID())
                    continue;
                
                CFNumberRef valueNumber = static_cast<CFNumberRef>(value);
                double relativeFontSize;
                CFNumberGetValue(valueNumber, kCFNumberFloat64Type, &relativeFontSize);
                cueData->setRelativeFontSize(relativeFontSize);
                continue;
            }

            if (CFStringCompare(key, kCMTextMarkupAttribute_FontFamilyName, 0) == kCFCompareEqualTo) {
                valueString = static_cast<CFStringRef>(value);
                if (CFGetTypeID(valueString) != CFStringGetTypeID() || !CFStringGetLength(valueString))
                    continue;
                if (processed & FontName)
                    continue;
                processed |= FontName;
                
                cueData->setFontName(valueString);
                continue;
            }

            if (CFStringCompare(key, kCMTextMarkupAttribute_ForegroundColorARGB, 0) == kCFCompareEqualTo) {
                CFArrayRef arrayValue = static_cast<CFArrayRef>(value);
                if (CFGetTypeID(arrayValue) != CFArrayGetTypeID())
                    continue;
                
                RGBA32 color;
                if (!makeRGBA32FromARGBCFArray(arrayValue, color))
                    continue;
                cueData->setForegroundColor(color);
            }
            
            if (CFStringCompare(key, kCMTextMarkupAttribute_BackgroundColorARGB, 0) == kCFCompareEqualTo) {
                CFArrayRef arrayValue = static_cast<CFArrayRef>(value);
                if (CFGetTypeID(arrayValue) != CFArrayGetTypeID())
                    continue;
                
                RGBA32 color;
                if (!makeRGBA32FromARGBCFArray(arrayValue, color))
                    continue;
                cueData->setBackgroundColor(color);
            }

            if (CFStringCompare(key, kCMTextMarkupAttribute_CharacterBackgroundColorARGB, 0) == kCFCompareEqualTo) {
                CFArrayRef arrayValue = static_cast<CFArrayRef>(value);
                if (CFGetTypeID(arrayValue) != CFArrayGetTypeID())
                    continue;
                
                RGBA32 color;
                if (!makeRGBA32FromARGBCFArray(arrayValue, color))
                    continue;
                cueData->setHighlightColor(color);
            }
        }

        content.append(tagStart);
        content.append(attributedStringValue.substring(effectiveRange.location, effectiveRange.length));
        content.append(tagEnd);
    }

    if (content.length())
        cueData->setContent(content.toString());
}
Esempio n. 28
0
  void interpolate_nonmatching_mesh(const GenericFunction& u0, Function& u) 
  {
    // Interpolate from GenericFunction u0 to FunctionSpace of Function u
    // The FunctionSpace of u can have a different mesh than that of u0 
    // (if u0 has a mesh)
    //
    // The algorithm is like this
    //
    //   1) Tabulate all coordinates for all dofs in u.function_space()
    //   2) Create a map from dof to component number in Mixed Space.
    //   3) Evaluate u0 for all coordinates in u (computed in 1)). 
    //        Problem here is that u0 and u will have different meshes 
    //        and as such a vertex in u will not necessarily be found 
    //        on the same processor for u0. Hence the vertex will be 
    //        passed around and searched on all ranks until found.
    //   4) Set all values in local u using the dof to component map
    
    // Get the function space interpolated to
    boost::shared_ptr<const FunctionSpace> V = u.function_space();
    
    // Get mesh and dimension of the FunctionSpace interpolated to
    const Mesh& mesh = *V->mesh();
    const std::size_t gdim = mesh.geometry().dim();
    
    // Create arrays used to evaluate one point
    std::vector<double> x(gdim);
    std::vector<double> values(u.value_size());
    Array<double> _x(gdim, x.data());
    Array<double> _values(u.value_size(), values.data());
    
    // Create vector to hold all local values of u 
    std::vector<double> local_u_vector(u.vector()->local_size());
    
    // Get coordinates of all dofs on mesh of this processor
    std::vector<double> coords = V->dofmap()->tabulate_all_coordinates(mesh);
    
    // Get dof ownership range
    std::pair<std::size_t, std::size_t> owner_range = V->dofmap()->ownership_range();
        
    // Get a map from global dofs to component number in mixed space
    std::map<std::size_t, std::size_t> dof_component_map;
    int component = -1;
    extract_dof_component_map(dof_component_map, *V, &component);
        
    // Search this process first for all coordinates in u's local mesh
    std::vector<std::size_t> global_dofs_not_found;
    std::vector<double> coords_not_found;
    for (std::size_t j=0; j<coords.size()/gdim; j++)
    {    
      std::copy(coords.begin()+j*gdim, coords.begin()+(j+1)*gdim, x.begin());
      try
      { // store when point is found
        u0.eval(_values, _x);  // This evaluates all dofs, but need only one component. Possible fix?
        local_u_vector[j] = values[dof_component_map[j+owner_range.first]];
      } 
      catch (std::exception &e)
      { // If not found then it must be seached on the other processes
        global_dofs_not_found.push_back(j+owner_range.first);
        for (std::size_t jj=0; jj<gdim; jj++)
          coords_not_found.push_back(x[jj]);
      }
    }
    
    // Send all points not found to processor with one higher rank.
    // Search there and send found points back to owner and not found to 
    // next processor in line. By the end of this loop all processors 
    // will have been searched and thus if not found the point is not
    // in the mesh of Function u0. In that case the point will take
    // the value of zero.
    std::size_t num_processes = MPI::num_processes();
    std::size_t rank = MPI::process_number();
    for (std::size_t k = 1; k < num_processes; ++k)
    {
      std::vector<double> coords_recv;
      std::vector<std::size_t> global_dofs_recv;
           
      std::size_t src = (rank-1+num_processes) % num_processes;
      std::size_t dest =  (rank+1) % num_processes;
      
      MPI::send_recv(global_dofs_not_found, dest, global_dofs_recv, src);
      MPI::send_recv(coords_not_found, dest, coords_recv, src);
      
      global_dofs_not_found.clear();
      coords_not_found.clear();
      
      // Search this processor for received points
      std::vector<std::size_t> global_dofs_found;
      std::vector<std::vector<double> > coefficients_found;
      for (std::size_t j=0; j<coords_recv.size()/gdim; j++)
      {        
        std::size_t m = global_dofs_recv[j];
        std::copy(coords_recv.begin()+j*gdim, coords_recv.begin()+(j+1)*gdim, x.begin());

        try
        { // push back when point is found
          u0.eval(_values, _x);
          coefficients_found.push_back(values);
          global_dofs_found.push_back(m);
        } 
        catch (std::exception &e)
        { // If not found then collect and send to next rank
          global_dofs_not_found.push_back(m);
          for (std::size_t jj=0; jj<gdim; jj++)
            coords_not_found.push_back(x[jj]);
        }
      }     
      
      // Send found coefficients back to owner (dest)
      std::vector<std::size_t> global_dofs_found_recv;
      std::vector<std::vector<double> > coefficients_found_recv;
      dest = (rank-k+num_processes) % num_processes;
      src  = (rank+k) % num_processes;
      MPI::send_recv(global_dofs_found, dest, global_dofs_found_recv, src);
      MPI::send_recv(coefficients_found, dest, coefficients_found_recv, src);

      // Move all found coefficients onto the local_u_vector
      // Choose the correct component using dof_component_map  
      for (std::size_t j=0; j<global_dofs_found_recv.size(); j++)
      {
        std::size_t m = global_dofs_found_recv[j]-owner_range.first;
        std::size_t n = dof_component_map[m+owner_range.first];
        local_u_vector[m] = coefficients_found_recv[j][n];
      }
      
      // Note that this algorithm computes and sends back all values, 
      // i.e., coefficients_found pushes back the entire vector for all 
      // components in mixed space. An alternative algorithm is to send 
      // around the correct component number in addition to global dof number 
      // and coordinates and then just send back the correct value.
    }
    u.vector()->set_local(local_u_vector);
  }
Esempio n. 29
0
int
Tri31::displaySelf(Renderer &theViewer, int displayMode, float fact, const char **modes, int numMode)
{

    // first set the quantity to be displayed at the nodes;
    // if displayMode is 1 through 3 we will plot material stresses otherwise 0.0

    static Vector values(numgp);

    for (int j=0; j<numgp; j++) values(j) = 0.0;

    if (displayMode < numgp && displayMode > 0) {
		for (int i=0; i<numgp; i++) {
			const Vector &stress = theMaterial[i]->getStress();
	        values(i) = stress(displayMode-1);
	    }
    }

    // now  determine the end points of the Tri31 based on
    // the display factor (a measure of the distorted image)
    // store this information in 3 3d vectors v1 through v3
    const Vector &end1Crd = theNodes[0]->getCrds();
    const Vector &end2Crd = theNodes[1]->getCrds();	
    const Vector &end3Crd = theNodes[2]->getCrds();	

    static Matrix coords(numnodes,3);

    if (displayMode >= 0) {  

		const Vector &end1Disp = theNodes[0]->getDisp();
        const Vector &end2Disp = theNodes[1]->getDisp();
        const Vector &end3Disp = theNodes[2]->getDisp();

        for (int i = 0; i < 2; i++) {
			coords(0,i) = end1Crd(i) + end1Disp(i)*fact;
	        coords(1,i) = end2Crd(i) + end2Disp(i)*fact;    
	        coords(2,i) = end3Crd(i) + end3Disp(i)*fact;    
        }
    } else {
		int mode = displayMode * -1;
        const Matrix &eigen1 = theNodes[0]->getEigenvectors();
        const Matrix &eigen2 = theNodes[1]->getEigenvectors();
        const Matrix &eigen3 = theNodes[2]->getEigenvectors();
        if (eigen1.noCols() >= mode) {
			for (int i = 0; i < 2; i++) {
				coords(0,i) = end1Crd(i) + eigen1(i,mode-1)*fact;
	            coords(1,i) = end2Crd(i) + eigen2(i,mode-1)*fact;
	            coords(2,i) = end3Crd(i) + eigen3(i,mode-1)*fact;
	        }    
       } else {
		   for (int i = 0; i < 2; i++) {
			   coords(0,i) = end1Crd(i);
	           coords(1,i) = end2Crd(i);
	           coords(2,i) = end3Crd(i);
	       }    
       }
    }
    
    int error = 0;

    // finally we draw the element using drawPolygon
    error += theViewer.drawPolygon (coords, values);

    return error;
}
Esempio n. 30
0
int main(int argc, char *argv[])
{
    #include "setRootCase.H"
    #include "createTime.H"
    #include "createMesh.H"

{
    scalarField samples(4);
    samples[0] = 0;
    samples[1] = 1;
    samples[2] = 2;
    samples[3] = 3;
    scalarField values(4);
    values = 1.0;
    //values[0] = 0.0;
    //values[1] = 1.0;

    linearInterpolationWeights interpolator
    //splineInterpolationWeights interpolator
    (
        samples
    );
    labelList indices;
    scalarField weights;

    interpolator.integrationWeights(1.1, 1.2, indices, weights);
    Pout<< "indices:" << indices << endl;
    Pout<< "weights:" << weights << endl;

    scalar baseSum = interpolator.weightedSum
    (
        weights,
        UIndirectList<scalar>(values, indices)
    );
    Pout<< "baseSum=" << baseSum << nl << nl << endl;


//    interpolator.integrationWeights(-0.01, 0, indices, weights);
//    scalar partialSum = interpolator.weightedSum
//    (
//        weights,
//        UIndirectList<scalar>(values, indices)
//    );
//    Pout<< "partialSum=" << partialSum << nl << nl << endl;
//
//
//    interpolator.integrationWeights(-0.01, 1, indices, weights);
//    //Pout<< "samples:" << samples << endl;
//    //Pout<< "indices:" << indices << endl;
//    //Pout<< "weights:" << weights << endl;
//    scalar sum = interpolator.weightedSum
//    (
//        weights,
//        UIndirectList<scalar>(values, indices)
//    );
//    Pout<< "integrand=" << sum << nl << nl << endl;


    return 1;
}

    IOdictionary function1Properties
    (
        IOobject
        (
            "function1Properties",
            runTime.constant(),
            mesh,
            IOobject::MUST_READ_IF_MODIFIED,
            IOobject::NO_WRITE
        )
    );

    autoPtr<Function1<scalar>> function1
    (
        Function1<scalar>::New
        (
            "function1",
            function1Properties
        )
    );

    scalar x0 = readScalar(function1Properties.lookup("x0"));
    scalar x1 = readScalar(function1Properties.lookup("x1"));

    Info<< "Data entry type: " << function1().type() << nl << endl;

    Info<< "Inputs" << nl
        << "    x0 = " << x0 << nl
        << "    x1 = " << x1 << nl
        << endl;

    Info<< "Interpolation" << nl
        << "    f(x0) = " << function1().value(x0) << nl
        << "    f(x1) = " << function1().value(x1) << nl
        << endl;

    Info<< "Integration" << nl
        << "    int(f(x)) lim(x0->x1) = " << function1().integrate(x0, x1) << nl
        << endl;

    return 0;
}