Example #1
0
bool SIMoutput::writeGlvS2 (const Vector& psol, int iStep, int& nBlock,
                            double time, int idBlock, int psolComps)
{
  if (psol.empty())
    return true; // No primary solution
  else if (!myVtf || !myProblem)
    return false;
  else if (myProblem->getNoSolutions() < 1)
    return true; // No patch-level primary solution

  size_t nf = myProblem->getNoFields(2);
  if (nf < 1) return true; // No secondary solution

  bool haveAsol = false;
  if (mySol)
  {
    if (this->getNoFields() == 1)
      haveAsol = mySol->hasScalarSol() > 1;
    else
      haveAsol = mySol->hasVectorSol() > 1;
  }

  bool doProject = (opt.discretization == ASM::Spline ||
                    opt.discretization == ASM::SplineC1) &&
    opt.project.find(SIMoptions::GLOBAL) != opt.project.end();

  size_t sMAX = nf;
  if (haveAsol) sMAX += nf;
  if (doProject) sMAX += nf;
  std::vector<IntVec> sID(sMAX);
  std::array<IntVec,2> vID;
  Matrix field, pdir;
  Vector lovec;

  size_t i, j, k;
  int geomID = myGeomID;
  for (i = 0; i < myModel.size(); i++)
  {
    if (myModel[i]->empty()) continue; // skip empty patches

    if (msgLevel > 1)
      IFEM::cout <<"Writing secondary solution for patch "<< i+1 << std::endl;

    // Direct evaluation of secondary solution variables

    LocalSystem::patch = i;
    myProblem->initResultPoints(time,true);
    myModel[i]->extractNodeVec(psol,myProblem->getSolution(),psolComps,0);
    this->extractPatchDependencies(myProblem,myModel,i);
    this->setPatchMaterial(i+1);
    if (!myModel[i]->evalSolution(field,*myProblem,opt.nViz))
      return false;

    myModel[i]->filterResults(field,myVtf->getBlock(++geomID));

    for (j = 1, k = 0; j <= field.rows() && k < sMAX; j++)
      if (!myVtf->writeNres(field.getRow(j),++nBlock,geomID))
        return false;
      else
        sID[k++].push_back(nBlock);

    // Write principal directions, if any, as vector fields

    size_t nPoints = field.cols();
    for (j = 0; j < 2 && myProblem->getPrincipalDir(pdir,nPoints,j+1); j++)
    {
      myModel[i]->filterResults(pdir,myVtf->getBlock(geomID));
      if (!myVtf->writeVres(pdir,++nBlock,geomID,this->getNoSpaceDim()))
        return -2;
      else
        vID[j].push_back(nBlock);
    }

    if (doProject)
    {
      // Projection of secondary solution variables (tensorial splines only)

      myProblem->initResultPoints(time);
      if (!myModel[i]->evalSolution(field,*myProblem,opt.nViz,'D'))
        return false;

      myModel[i]->filterResults(field,myVtf->getBlock(geomID));

      for (j = 1; j <= field.rows() && k < sMAX; j++)
        if (!myVtf->writeNres(field.getRow(j),++nBlock,geomID))
          return false;
        else
          sID[k++].push_back(nBlock);
    }

    if (haveAsol)
    {
      // Evaluate analytical solution variables

      if (msgLevel > 1)
        IFEM::cout <<"Writing exact solution for patch "<< i+1 << std::endl;

      const ElementBlock* grid = myVtf->getBlock(geomID);
      Vec3Vec::const_iterator cit = grid->begin_XYZ();
      field.fill(0.0);
      for (j = 1; cit != grid->end_XYZ() && haveAsol; j++, cit++)
      {
        Vec4 Xt(*cit,time);
        if (mySol->hasScalarSol() == 3 || mySol->hasVectorSol() == 3)
          haveAsol = myProblem->evalSol(lovec,*mySol->getStressSol(),Xt);
        else if (this->getNoFields() == 1)
          haveAsol = myProblem->evalSol(lovec,*mySol->getScalarSecSol(),Xt);
        else
          haveAsol = myProblem->evalSol(lovec,*mySol->getVectorSecSol(),Xt);
        if (haveAsol)
          field.fillColumn(j,lovec);
      }

      for (j = 1; j <= field.rows() && k < sMAX && haveAsol; j++)
        if (!myVtf->writeNres(field.getRow(j),++nBlock,geomID))
          return false;
        else
          sID[k++].push_back(nBlock);
    }
  }

  // Write result block identifications

  std::string vname("Principal direction P1");
  for (i = 0; i < 2; i++, vname[vname.size()-1]++)
    if (!vID[i].empty())
      if (!myVtf->writeVblk(vID[i],vname.c_str(),idBlock+i,iStep))
        return false;

  const char* prefix = haveAsol ? "FE" : nullptr;
  for (i = j = 0; i < nf && j < sMAX && !sID[j].empty(); i++, j++)
    if (!myVtf->writeSblk(sID[j],myProblem->getField2Name(i,prefix).c_str(),
                          idBlock++,iStep)) return false;

  if (doProject)
    for (i = 0; i < nf && j < sMAX && !sID[j].empty(); i++, j++)
      if (!myVtf->writeSblk(sID[j],myProblem->getField2Name(i,"Projected").c_str(),
                            idBlock++,iStep)) return false;

  if (haveAsol)
    for (i = 0; i < nf && j < sMAX && !sID[j].empty(); i++, j++)
      if (!myVtf->writeSblk(sID[j],myProblem->getField2Name(i,"Exact").c_str(),
                            idBlock++,iStep)) return false;

  return true;
}
bool
LiveRangeAllocator<VREG>::buildLivenessInfo()
{
    if (!init())
        return false;

    Vector<MBasicBlock *, 1, SystemAllocPolicy> loopWorkList;
    BitSet *loopDone = BitSet::New(graph.numBlockIds());
    if (!loopDone)
        return false;

    for (size_t i = graph.numBlocks(); i > 0; i--) {
        if (mir->shouldCancel("Build Liveness Info (main loop)"))
            return false;

        LBlock *block = graph.getBlock(i - 1);
        MBasicBlock *mblock = block->mir();

        BitSet *live = BitSet::New(graph.numVirtualRegisters());
        if (!live)
            return false;
        liveIn[mblock->id()] = live;

        // Propagate liveIn from our successors to us
        for (size_t i = 0; i < mblock->lastIns()->numSuccessors(); i++) {
            MBasicBlock *successor = mblock->lastIns()->getSuccessor(i);
            // Skip backedges, as we fix them up at the loop header.
            if (mblock->id() < successor->id())
                live->insertAll(liveIn[successor->id()]);
        }

        // Add successor phis
        if (mblock->successorWithPhis()) {
            LBlock *phiSuccessor = mblock->successorWithPhis()->lir();
            for (unsigned int j = 0; j < phiSuccessor->numPhis(); j++) {
                LPhi *phi = phiSuccessor->getPhi(j);
                LAllocation *use = phi->getOperand(mblock->positionInPhiSuccessor());
                uint32_t reg = use->toUse()->virtualRegister();
                live->insert(reg);
            }
        }

        // Variables are assumed alive for the entire block, a define shortens
        // the interval to the point of definition.
        for (BitSet::Iterator liveRegId(*live); liveRegId; liveRegId++) {
            if (!vregs[*liveRegId].getInterval(0)->addRangeAtHead(inputOf(block->firstId()),
                                                                  outputOf(block->lastId()).next()))
            {
                return false;
            }
        }

        // Shorten the front end of live intervals for live variables to their
        // point of definition, if found.
        for (LInstructionReverseIterator ins = block->rbegin(); ins != block->rend(); ins++) {
            // Calls may clobber registers, so force a spill and reload around the callsite.
            if (ins->isCall()) {
                for (AnyRegisterIterator iter(allRegisters_); iter.more(); iter++) {
                    if (forLSRA) {
                        if (!addFixedRangeAtHead(*iter, inputOf(*ins), outputOf(*ins)))
                            return false;
                    } else {
                        bool found = false;
                        for (size_t i = 0; i < ins->numDefs(); i++) {
                            if (ins->getDef(i)->isPreset() &&
                                *ins->getDef(i)->output() == LAllocation(*iter)) {
                                found = true;
                                break;
                            }
                        }
                        if (!found && !addFixedRangeAtHead(*iter, outputOf(*ins), outputOf(*ins).next()))
                            return false;
                    }
                }
            }

            for (size_t i = 0; i < ins->numDefs(); i++) {
                if (ins->getDef(i)->policy() != LDefinition::PASSTHROUGH) {
                    LDefinition *def = ins->getDef(i);

                    CodePosition from;
                    if (def->policy() == LDefinition::PRESET && def->output()->isRegister() && forLSRA) {
                        // The fixed range covers the current instruction so the
                        // interval for the virtual register starts at the next
                        // instruction. If the next instruction has a fixed use,
                        // this can lead to unnecessary register moves. To avoid
                        // special handling for this, assert the next instruction
                        // has no fixed uses. defineFixed guarantees this by inserting
                        // an LNop.
                        JS_ASSERT(!NextInstructionHasFixedUses(block, *ins));
                        AnyRegister reg = def->output()->toRegister();
                        if (!addFixedRangeAtHead(reg, inputOf(*ins), outputOf(*ins).next()))
                            return false;
                        from = outputOf(*ins).next();
                    } else {
                        from = forLSRA ? inputOf(*ins) : outputOf(*ins);
                    }

                    if (def->policy() == LDefinition::MUST_REUSE_INPUT) {
                        // MUST_REUSE_INPUT is implemented by allocating an output
                        // register and moving the input to it. Register hints are
                        // used to avoid unnecessary moves. We give the input an
                        // LUse::ANY policy to avoid allocating a register for the
                        // input.
                        LUse *inputUse = ins->getOperand(def->getReusedInput())->toUse();
                        JS_ASSERT(inputUse->policy() == LUse::REGISTER);
                        JS_ASSERT(inputUse->usedAtStart());
                        *inputUse = LUse(inputUse->virtualRegister(), LUse::ANY, /* usedAtStart = */ true);
                    }

                    LiveInterval *interval = vregs[def].getInterval(0);
                    interval->setFrom(from);

                    // Ensure that if there aren't any uses, there's at least
                    // some interval for the output to go into.
                    if (interval->numRanges() == 0) {
                        if (!interval->addRangeAtHead(from, from.next()))
                            return false;
                    }
                    live->remove(def->virtualRegister());
                }
            }

            for (size_t i = 0; i < ins->numTemps(); i++) {
                LDefinition *temp = ins->getTemp(i);
                if (temp->isBogusTemp())
                    continue;

                if (forLSRA) {
                    if (temp->policy() == LDefinition::PRESET) {
                        if (ins->isCall())
                            continue;
                        AnyRegister reg = temp->output()->toRegister();
                        if (!addFixedRangeAtHead(reg, inputOf(*ins), outputOf(*ins)))
                            return false;

                        // Fixed intervals are not added to safepoints, so do it
                        // here.
                        if (LSafepoint *safepoint = ins->safepoint())
                            AddRegisterToSafepoint(safepoint, reg, *temp);
                    } else {
                        JS_ASSERT(!ins->isCall());
                        if (!vregs[temp].getInterval(0)->addRangeAtHead(inputOf(*ins), outputOf(*ins)))
                            return false;
                    }
                } else {
                    // Normally temps are considered to cover both the input
                    // and output of the associated instruction. In some cases
                    // though we want to use a fixed register as both an input
                    // and clobbered register in the instruction, so watch for
                    // this and shorten the temp to cover only the output.
                    CodePosition from = inputOf(*ins);
                    if (temp->policy() == LDefinition::PRESET) {
                        AnyRegister reg = temp->output()->toRegister();
                        for (LInstruction::InputIterator alloc(**ins); alloc.more(); alloc.next()) {
                            if (alloc->isUse()) {
                                LUse *use = alloc->toUse();
                                if (use->isFixedRegister()) {
                                    if (GetFixedRegister(vregs[use].def(), use) == reg)
                                        from = outputOf(*ins);
                                }
                            }
                        }
                    }

                    CodePosition to =
                        ins->isCall() ? outputOf(*ins) : outputOf(*ins).next();
                    if (!vregs[temp].getInterval(0)->addRangeAtHead(from, to))
                        return false;
                }
            }

            DebugOnly<bool> hasUseRegister = false;
            DebugOnly<bool> hasUseRegisterAtStart = false;

            for (LInstruction::InputIterator alloc(**ins); alloc.more(); alloc.next()) {
                if (alloc->isUse()) {
                    LUse *use = alloc->toUse();

                    // The first instruction, LLabel, has no uses.
                    JS_ASSERT(inputOf(*ins) > outputOf(block->firstId()));

                    // Call uses should always be at-start or fixed, since the fixed intervals
                    // use all registers.
                    JS_ASSERT_IF(ins->isCall() && !alloc.isSnapshotInput(),
                                 use->isFixedRegister() || use->usedAtStart());

#ifdef DEBUG
                    // Don't allow at-start call uses if there are temps of the same kind,
                    // so that we don't assign the same register.
                    if (ins->isCall() && use->usedAtStart()) {
                        for (size_t i = 0; i < ins->numTemps(); i++)
                            JS_ASSERT(vregs[ins->getTemp(i)].isDouble() != vregs[use].isDouble());
                    }

                    // If there are both useRegisterAtStart(x) and useRegister(y)
                    // uses, we may assign the same register to both operands due to
                    // interval splitting (bug 772830). Don't allow this for now.
                    if (use->policy() == LUse::REGISTER) {
                        if (use->usedAtStart()) {
                            if (!IsInputReused(*ins, use))
                                hasUseRegisterAtStart = true;
                        } else {
                            hasUseRegister = true;
                        }
                    }

                    JS_ASSERT(!(hasUseRegister && hasUseRegisterAtStart));
#endif

                    // Don't treat RECOVERED_INPUT uses as keeping the vreg alive.
                    if (use->policy() == LUse::RECOVERED_INPUT)
                        continue;

                    CodePosition to;
                    if (forLSRA) {
                        if (use->isFixedRegister()) {
                            AnyRegister reg = GetFixedRegister(vregs[use].def(), use);
                            if (!addFixedRangeAtHead(reg, inputOf(*ins), outputOf(*ins)))
                                return false;
                            to = inputOf(*ins);

                            // Fixed intervals are not added to safepoints, so do it
                            // here.
                            LSafepoint *safepoint = ins->safepoint();
                            if (!ins->isCall() && safepoint)
                                AddRegisterToSafepoint(safepoint, reg, *vregs[use].def());
                        } else {
                            to = use->usedAtStart() ? inputOf(*ins) : outputOf(*ins);
                        }
                    } else {
                        to = (use->usedAtStart() || ins->isCall())
                           ? inputOf(*ins) : outputOf(*ins);
                        if (use->isFixedRegister()) {
                            LAllocation reg(AnyRegister::FromCode(use->registerCode()));
                            for (size_t i = 0; i < ins->numDefs(); i++) {
                                LDefinition *def = ins->getDef(i);
                                if (def->policy() == LDefinition::PRESET && *def->output() == reg)
                                    to = inputOf(*ins);
                            }
                        }
                    }

                    LiveInterval *interval = vregs[use].getInterval(0);
                    if (!interval->addRangeAtHead(inputOf(block->firstId()), forLSRA ? to : to.next()))
                        return false;
                    interval->addUse(new UsePosition(use, to));

                    live->insert(use->virtualRegister());
                }
            }
        }

        // Phis have simultaneous assignment semantics at block begin, so at
        // the beginning of the block we can be sure that liveIn does not
        // contain any phi outputs.
        for (unsigned int i = 0; i < block->numPhis(); i++) {
            LDefinition *def = block->getPhi(i)->getDef(0);
            if (live->contains(def->virtualRegister())) {
                live->remove(def->virtualRegister());
            } else {
                // This is a dead phi, so add a dummy range over all phis. This
                // can go away if we have an earlier dead code elimination pass.
                if (!vregs[def].getInterval(0)->addRangeAtHead(inputOf(block->firstId()),
                                                               outputOf(block->firstId())))
                {
                    return false;
                }
            }
        }

        if (mblock->isLoopHeader()) {
            // A divergence from the published algorithm is required here, as
            // our block order does not guarantee that blocks of a loop are
            // contiguous. As a result, a single live interval spanning the
            // loop is not possible. Additionally, we require liveIn in a later
            // pass for resolution, so that must also be fixed up here.
            MBasicBlock *loopBlock = mblock->backedge();
            while (true) {
                // Blocks must already have been visited to have a liveIn set.
                JS_ASSERT(loopBlock->id() >= mblock->id());

                // Add an interval for this entire loop block
                CodePosition from = inputOf(loopBlock->lir()->firstId());
                CodePosition to = outputOf(loopBlock->lir()->lastId()).next();

                for (BitSet::Iterator liveRegId(*live); liveRegId; liveRegId++) {
                    if (!vregs[*liveRegId].getInterval(0)->addRange(from, to))
                        return false;
                }

                // Fix up the liveIn set to account for the new interval
                liveIn[loopBlock->id()]->insertAll(live);

                // Make sure we don't visit this node again
                loopDone->insert(loopBlock->id());

                // If this is the loop header, any predecessors are either the
                // backedge or out of the loop, so skip any predecessors of
                // this block
                if (loopBlock != mblock) {
                    for (size_t i = 0; i < loopBlock->numPredecessors(); i++) {
                        MBasicBlock *pred = loopBlock->getPredecessor(i);
                        if (loopDone->contains(pred->id()))
                            continue;
                        if (!loopWorkList.append(pred))
                            return false;
                    }
                }

                // Terminate loop if out of work.
                if (loopWorkList.empty())
                    break;

                // Grab the next block off the work list, skipping any OSR block.
                while (!loopWorkList.empty()) {
                    loopBlock = loopWorkList.popCopy();
                    if (loopBlock->lir() != graph.osrBlock())
                        break;
                }

                // If end is reached without finding a non-OSR block, then no more work items were found.
                if (loopBlock->lir() == graph.osrBlock()) {
                    JS_ASSERT(loopWorkList.empty());
                    break;
                }
            }

            // Clear the done set for other loops
            loopDone->clear();
        }

        JS_ASSERT_IF(!mblock->numPredecessors(), live->empty());
    }

    validateVirtualRegisters();

    // If the script has an infinite loop, there may be no MReturn and therefore
    // no fixed intervals. Add a small range to fixedIntervalsUnion so that the
    // rest of the allocator can assume it has at least one range.
    if (fixedIntervalsUnion->numRanges() == 0) {
        if (!fixedIntervalsUnion->addRangeAtHead(CodePosition(0, CodePosition::INPUT),
                                                 CodePosition(0, CodePosition::OUTPUT)))
        {
            return false;
        }
    }

    return true;
}
bool
ion::EliminatePhis(MIRGenerator *mir, MIRGraph &graph)
{
    Vector<MPhi *, 16, SystemAllocPolicy> worklist;

    // Add all observable phis to a worklist. We use the "in worklist" bit to
    // mean "this phi is live".
    for (PostorderIterator block = graph.poBegin(); block != graph.poEnd(); block++) {
        if (mir->shouldCancel("Eliminate Phis (populate loop)"))
            return false;

        MPhiIterator iter = block->phisBegin();
        while (iter != block->phisEnd()) {
            // Flag all as unused, only observable phis would be marked as used
            // when processed by the work list.
            iter->setUnused();

            // If the phi is redundant, remove it here.
            if (MDefinition *redundant = IsPhiRedundant(*iter)) {
                iter->replaceAllUsesWith(redundant);
                iter = block->discardPhiAt(iter);
                continue;
            }

            // Enqueue observable Phis.
            if (IsPhiObservable(*iter)) {
                iter->setInWorklist();
                if (!worklist.append(*iter))
                    return false;
            }
            iter++;
        }
    }

    // Iteratively mark all phis reachable from live phis.
    while (!worklist.empty()) {
        if (mir->shouldCancel("Eliminate Phis (worklist)"))
            return false;

        MPhi *phi = worklist.popCopy();
        JS_ASSERT(phi->isUnused());
        phi->setNotInWorklist();

        // The removal of Phis can produce newly redundant phis.
        if (MDefinition *redundant = IsPhiRedundant(phi)) {
            // Add to the worklist the used phis which are impacted.
            for (MUseDefIterator it(phi); it; it++) {
                if (it.def()->isPhi()) {
                    MPhi *use = it.def()->toPhi();
                    if (!use->isUnused()) {
                        use->setUnusedUnchecked();
                        use->setInWorklist();
                        if (!worklist.append(use))
                            return false;
                    }
                }
            }
            phi->replaceAllUsesWith(redundant);
        } else {
            // Otherwise flag them as used.
            phi->setNotUnused();
        }

        // The current phi is/was used, so all its operands are used.
        for (size_t i = 0; i < phi->numOperands(); i++) {
            MDefinition *in = phi->getOperand(i);
            if (!in->isPhi() || !in->isUnused() || in->isInWorklist())
                continue;
            in->setInWorklist();
            if (!worklist.append(in->toPhi()))
                return false;
        }
    }

    // Sweep dead phis.
    for (PostorderIterator block = graph.poBegin(); block != graph.poEnd(); block++) {
        MPhiIterator iter = block->phisBegin();
        while (iter != block->phisEnd()) {
            if (iter->isUnused())
                iter = block->discardPhiAt(iter);
            else
                iter++;
        }
    }

    return true;
}
Example #4
0
bool ElasticBeam::evalInt (LocalIntegral& elmInt,
                           const FiniteElement& fe,
                           const Vec3& X) const
{
  // Calculate initial element length
  Vec3 X0 = fe.XC[1] - fe.XC[0];
  double L0 = X0.length();
  if (L0 <= 1.0e-8)
  {
    std::cerr <<" *** ElasticBeam::evalInt: Zero initial element length "
              << L0 << std::endl;
    return false;
  }
#if INT_DEBUG > 1
  std::cout <<"ElasticBeam: X = "<< X <<", L0 = "<< L0;
#endif

  const Vector& eV = elmInt.vec.front();
  if (!eV.empty())
  {
    // Calculate current element length
    Vec3 U1(eV.ptr(),npv);
    Vec3 U2(eV.ptr()+eV.size()-npv,npv);
    Vec3 X1 = X0 + U2 - U1;
    double L = X1.length();
    if (L <= 1.0e-8)
    {
      std::cerr <<" *** ElasticBeam::evalInt: Zero element length "
                << L << std::endl;
      return false;
    }
#if INT_DEBUG > 1
    std::cout <<" L = "<< L <<", U1 = "<< U1 <<" U2 = "<< U2;
#endif
  }

  // Evaluate beam stiffness properties at this point
  double EA  = EAfunc  ? (*EAfunc)(X)  : E*A;
  double EIy = EIyfunc ? (*EIyfunc)(X) : E*Iy;
  double EIz = EIzfunc ? (*EIzfunc)(X) : E*Iz;
  double GIt = GItfunc ? (*GItfunc)(X) : G*It;
#if INT_DEBUG > 1
  std::cout <<"\n             EA = "<< EA
            <<" EI = "<< EIy <<" "<< EIz <<" GIt = "<< GIt;
#endif

  // Evaluate the beam mass properties (if needed) at this point
  bool hasGrF = gravity.isZero() ? false : eS > 0;
  double rhoA = rhofunc && (eM > 0 || hasGrF) ? (*rhofunc)(X) : rho*A;
  double I_xx = Ixfunc  &&  eM > 0            ? (*Ixfunc)(X)  : rho*Ix;
  double I_yy = Iyfunc  &&  eM > 0            ? (*Iyfunc)(X)  : rho*Iy;
  double I_zz = Izfunc  &&  eM > 0            ? (*Izfunc)(X)  : rho*Iz;
  double CG_y = CGyfunc && (eM > 0 || hasGrF) ? (*CGyfunc)(X) : 0.0;
  double CG_z = CGzfunc && (eM > 0 || hasGrF) ? (*CGzfunc)(X) : 0.0;
#if INT_DEBUG > 1
  std::cout <<", rho*A = "<< rhoA <<" rho*I = "<< I_xx <<" "<< I_yy <<" "<< I_zz
            <<", CoG = "<< CG_y <<" "<< CG_z << std::endl;
#endif

  ElmMats& elMat = static_cast<ElmMats&>(elmInt);

  if (hasGrF)
  {
    // External (gravitation) forces
    Vector& S = elMat.b[eS-1];
    Vec3 gvec = (0.5*rhoA*L0)*gravity; // Nodal gravity force at each end
    for (unsigned short int i = 1; i <= 3; i++)
      S(i) = S(npv+i) = gvec[i-1];

    if (CG_y != 0.0 || CG_z != 0.0)
    {
      // The centre of gravity has an offset w.r.t. the neutral axis and will
      // therefore result in an additional torque load on the element
      Tensor Tlg(this->getLocalAxes(elmInt)); // Local-to-global transformation
      Vec3   gloc = gvec * Tlg; // Nodal gravity force in local element axes
      double Tgrav = gloc.z*CG_y - gloc.y*CG_z; // Torque from eccentric gravity
      Vec3   gmom = Tgrav*Tlg[0]; // Global moment from the eccentric gravity
      for (unsigned short int i = 4; i <= 6; i++)
        S(i) = S(npv+i) = gmom[i-4];
    }
#if INT_DEBUG > 1
    std::cout <<"ElasticBeam: S_ext"<< S << std::endl;
#endif
  }

  if (eKm) // Evaluate the material stiffness matrix
    this->getMaterialStiffness(elMat.A[eKm-1],EA,GIt,EIy,EIz,L0);

  Vector v;
  double N = 0.0;

  if ((iS || eKg) && eV.normInf() > 1.0e-16*L0)
  {
    v = eV; // Transform the element displacement vector to local coordinates
    const Matrix& Tlg = this->getLocalAxes(elmInt);
    for (size_t k = 1; k < v.size(); k += 3)
      if (!utl::transform(v,Tlg,k,true))
        return false;

#if INT_DEBUG > 1
    std::cout <<"ElasticBeam: v"<< v;
#endif
    if (eKg) N = EA*(v(7)-v(1))/L0; // Axial force
  }

  if (iS && !v.empty())
  {
    v *= -1.0;

    // Internal forces, S_int = Km*v
    Matrix tmpKm;
    if (!eKm) this->getMaterialStiffness(tmpKm,EA,GIt,EIy,EIz,L0);
    Matrix& Km = eKm ? elMat.A[eKm-1] : tmpKm;
    if (!Km.multiply(v,elMat.b[iS-1],false,iS == eS))
      return false;

#if INT_DEBUG > 1
    if (iS == eS)
      std::cout <<"ElasticBeam: S_ext - S_int"<< elMat.b[iS-1] << std::endl;
    else
      std::cout <<"ElasticBeam: -S_int"<< elMat.b[iS-1] << std::endl;
#endif
  }

  if (eKg && N != 0.0)
  {
#if INT_DEBUG > 1
    std::cout <<"ElasticBeam: Axial force, N = "<< N << std::endl;
#endif

    // Evaluate the geometric stiffness matrix
    if (eKg == eKm)
    {
      Matrix Kg(12,12);
      this->getGeometricStiffness(Kg,EIy,EIz,L0,N);
      elMat.A[eKm-1].add(Kg);
    }
    else
      this->getGeometricStiffness(elMat.A[eKg-1],EIy,EIz,L0,N);
  }

  if (eM)
  {
    // Evaluate the mass matrix
    this->getMassMatrix(elMat.A[eM-1],rhoA,I_xx,I_yy,I_zz,L0);
    if (CG_y != 0.0 || CG_z != 0.0) // Transform to neutral axis location
      eccTransform(elMat.A[eM-1],Vec3(0.0,-CG_y,-CG_z),Vec3(0.0,-CG_y,-CG_z));
  }

  return true;
}
void Forest::buildConvex( const Box3F &box, Convex *convex )
{
   mConvexList->collectGarbage();

   // Get all ForestItem(s) within the box.
   Vector<ForestItem> trees;
   mData->getItems( box, &trees );
   if ( trees.empty() )
      return;

   for ( U32 i = 0; i < trees.size(); i++ )
   {
      const ForestItem &forestItem = trees[i];

      Box3F realBox = box;
      mWorldToObj.mul( realBox );
      realBox.minExtents.convolveInverse( mObjScale );
      realBox.maxExtents.convolveInverse( mObjScale );

      // JCF: is this really necessary if we already got this ForestItem
      // as a result from getItems?
      if ( realBox.isOverlapped( getObjBox() ) == false )
         continue;      

      TSForestItemData *data = (TSForestItemData*)forestItem.getData();

      // Find CollisionDetail(s) that are defined...
      const Vector<S32> &details = data->getCollisionDetails();
      for ( U32 j = 0; j < details.size(); j++ ) 
      {
         // JCFHACK: need to fix this if we want this to work with speedtree
         // or other cases in which we don't have a TSForestItemData.
         // Most likely via preventing this method and other torque collision
         // specific stuff from ever getting called.
         if ( details[j] == -1 ) 
            continue;         

         // See if this convex exists in the working set already...
         Convex* cc = 0;
         CollisionWorkingList& wl = convex->getWorkingList();
         for ( CollisionWorkingList* itr = wl.wLink.mNext; itr != &wl; itr = itr->wLink.mNext ) 
         {
            if ( itr->mConvex->getType() == ForestConvexType )
            {
               ForestConvex *pConvex = static_cast<ForestConvex*>(itr->mConvex);

               if ( pConvex->mObject == this &&
                  pConvex->mForestItemKey == forestItem.getKey() &&
                  pConvex->hullId == j )
               {
                  cc = itr->mConvex;
                  break;
               }
            }
         }
         if (cc)
            continue;

         // Then we need to make one.
         ForestConvex *cp = new ForestConvex;
         mConvexList->registerObject(cp);
         convex->addToWorkingList(cp);
         cp->mObject          = this;
         cp->mForestItemKey   = forestItem.getKey();
         cp->mData            = data;
         cp->mScale           = forestItem.getScale();
         cp->hullId           = j;
         cp->box              = forestItem.getObjBox();
         cp->calculateTransform( forestItem.getTransform() );
      }
   }
}
Example #6
0
void
DataIO_save_vector_raw(DataIO& dio, T*, const Vector& x, ByteSwap_false)
{
	if (!x.empty())
		dio.ensureWrite(&*x.begin(), sizeof(T) * x.size());
}
Example #7
0
// A bounds check is considered redundant if it's dominated by another bounds
// check with the same length and the indexes differ by only a constant amount.
// In this case we eliminate the redundant bounds check and update the other one
// to cover the ranges of both checks.
//
// Bounds checks are added to a hash map and since the hash function ignores
// differences in constant offset, this offers a fast way to find redundant
// checks.
bool
ion::EliminateRedundantBoundsChecks(MIRGraph &graph)
{
    BoundsCheckMap checks;

    if (!checks.init())
        return false;

    // Stack for pre-order CFG traversal.
    Vector<MBasicBlock *, 1, IonAllocPolicy> worklist;

    // The index of the current block in the CFG traversal.
    size_t index = 0;

    // Add all self-dominating blocks to the worklist.
    // This includes all roots. Order does not matter.
    for (MBasicBlockIterator i(graph.begin()); i != graph.end(); i++) {
        MBasicBlock *block = *i;
        if (block->immediateDominator() == block) {
            if (!worklist.append(block))
                return false;
        }
    }

    // Starting from each self-dominating block, traverse the CFG in pre-order.
    while (!worklist.empty()) {
        MBasicBlock *block = worklist.popCopy();

        // Add all immediate dominators to the front of the worklist.
        for (size_t i = 0; i < block->numImmediatelyDominatedBlocks(); i++) {
            if (!worklist.append(block->getImmediatelyDominatedBlock(i)))
                return false;
        }

        for (MDefinitionIterator iter(block); iter; ) {
            if (!iter->isBoundsCheck()) {
                iter++;
                continue;
            }

            MBoundsCheck *check = iter->toBoundsCheck();

            // Replace all uses of the bounds check with the actual index.
            // This is (a) necessary, because we can coalesce two different
            // bounds checks and would otherwise use the wrong index and
            // (b) helps register allocation. Note that this is safe since
            // no other pass after bounds check elimination moves instructions.
            check->replaceAllUsesWith(check->index());

            if (!check->isMovable()) {
                iter++;
                continue;
            }

            MBoundsCheck *dominating = FindDominatingBoundsCheck(checks, check, index);
            if (!dominating)
                return false;

            if (dominating == check) {
                // We didn't find a dominating bounds check.
                iter++;
                continue;
            }

            bool eliminated = false;
            if (!TryEliminateBoundsCheck(dominating, check, &eliminated))
                return false;

            if (eliminated)
                iter = check->block()->discardDefAt(iter);
            else
                iter++;
        }
        index++;
    }

    JS_ASSERT(index == graph.numBlocks());
    return true;
}
Example #8
0
int Dumpsys::main(int argc, char* const argv[]) {
    Vector<String16> services;
    Vector<String16> args;
    Vector<String16> skippedServices;
    bool showListOnly = false;
    bool skipServices = false;
    int timeoutArg = 10;
    static struct option longOptions[] = {
        {"skip", no_argument, 0,  0 },
        {"help", no_argument, 0,  0 },
        {     0,           0, 0,  0 }
    };

    // Must reset optind, otherwise subsequent calls will fail (wouldn't happen on main.cpp, but
    // happens on test cases).
    optind = 1;
    while (1) {
        int c;
        int optionIndex = 0;

        c = getopt_long(argc, argv, "+t:l", longOptions, &optionIndex);

        if (c == -1) {
            break;
        }

        switch (c) {
        case 0:
            if (!strcmp(longOptions[optionIndex].name, "skip")) {
                skipServices = true;
            } else if (!strcmp(longOptions[optionIndex].name, "help")) {
                usage();
                return 0;
            }
            break;

        case 't':
            {
                char *endptr;
                timeoutArg = strtol(optarg, &endptr, 10);
                if (*endptr != '\0' || timeoutArg <= 0) {
                    fprintf(stderr, "Error: invalid timeout number: '%s'\n", optarg);
                    return -1;
                }
            }
            break;

        case 'l':
            showListOnly = true;
            break;

        default:
            fprintf(stderr, "\n");
            usage();
            return -1;
        }
    }

    for (int i = optind; i < argc; i++) {
        if (skipServices) {
            skippedServices.add(String16(argv[i]));
        } else {
            if (i == optind) {
                services.add(String16(argv[i]));
            } else {
                args.add(String16(argv[i]));
            }
        }
    }

    if ((skipServices && skippedServices.empty()) ||
            (showListOnly && (!services.empty() || !skippedServices.empty()))) {
        usage();
        return -1;
    }

    if (services.empty() || showListOnly) {
        // gets all services
        services = sm_->listServices();
        services.sort(sort_func);
        args.add(String16("-a"));
    }

    const size_t N = services.size();

    if (N > 1) {
        // first print a list of the current services
        aout << "Currently running services:" << endl;

        for (size_t i=0; i<N; i++) {
            sp<IBinder> service = sm_->checkService(services[i]);

            if (service != nullptr) {
                bool skipped = IsSkipped(skippedServices, services[i]);
                aout << "  " << services[i] << (skipped ? " (skipped)" : "") << endl;
            }
        }
    }

    if (showListOnly) {
        return 0;
    }

    for (size_t i = 0; i < N; i++) {
        const String16& service_name = std::move(services[i]);
        if (IsSkipped(skippedServices, service_name)) continue;

        sp<IBinder> service = sm_->checkService(service_name);
        if (service != nullptr) {
            int sfd[2];

            if (pipe(sfd) != 0) {
                aerr << "Failed to create pipe to dump service info for " << service_name
                     << ": " << strerror(errno) << endl;
                continue;
            }

            unique_fd local_end(sfd[0]);
            unique_fd remote_end(sfd[1]);
            sfd[0] = sfd[1] = -1;

            if (N > 1) {
                aout << "------------------------------------------------------------"
                        "-------------------" << endl;
                aout << "DUMP OF SERVICE " << service_name << ":" << endl;
            }

            // dump blocks until completion, so spawn a thread..
            std::thread dump_thread([=, remote_end { std::move(remote_end) }]() mutable {
                int err = service->dump(remote_end.get(), args);

                // It'd be nice to be able to close the remote end of the socketpair before the dump
                // call returns, to terminate our reads if the other end closes their copy of the
                // file descriptor, but then hangs for some reason. There doesn't seem to be a good
                // way to do this, though.
                remote_end.reset();

                if (err != 0) {
                    aerr << "Error dumping service info: (" << strerror(err) << ") " << service_name
                         << endl;
                }
            });

            auto timeout = std::chrono::seconds(timeoutArg);
            auto start = std::chrono::steady_clock::now();
            auto end = start + timeout;

            struct pollfd pfd = {
                .fd = local_end.get(),
                .events = POLLIN
            };

            bool timed_out = false;
            bool error = false;
            while (true) {
                // Wrap this in a lambda so that TEMP_FAILURE_RETRY recalculates the timeout.
                auto time_left_ms = [end]() {
                    auto now = std::chrono::steady_clock::now();
                    auto diff = std::chrono::duration_cast<std::chrono::milliseconds>(end - now);
                    return std::max(diff.count(), 0ll);
                };

                int rc = TEMP_FAILURE_RETRY(poll(&pfd, 1, time_left_ms()));
                if (rc < 0) {
                    aerr << "Error in poll while dumping service " << service_name << " : "
                         << strerror(errno) << endl;
                    error = true;
                    break;
                } else if (rc == 0) {
                    timed_out = true;
                    break;
                }

                char buf[4096];
                rc = TEMP_FAILURE_RETRY(read(local_end.get(), buf, sizeof(buf)));
                if (rc < 0) {
                    aerr << "Failed to read while dumping service " << service_name << ": "
                         << strerror(errno) << endl;
                    error = true;
                    break;
                } else if (rc == 0) {
                    // EOF.
                    break;
                }

                if (!WriteFully(STDOUT_FILENO, buf, rc)) {
                    aerr << "Failed to write while dumping service " << service_name << ": "
                         << strerror(errno) << endl;
                    error = true;
                    break;
                }
            }

            if (timed_out) {
                aout << endl
                     << "*** SERVICE '" << service_name << "' DUMP TIMEOUT (" << timeoutArg
                     << "s) EXPIRED ***" << endl
                     << endl;
            }

            if (timed_out || error) {
                dump_thread.detach();
            } else {
                dump_thread.join();
            }

            if (N > 1) {
              std::chrono::duration<double> elapsed_seconds =
                  std::chrono::steady_clock::now() - start;
              aout << StringPrintf("--------- %.3fs ", elapsed_seconds.count()).c_str()
                   << "was the duration of dumpsys " << service_name;

              using std::chrono::system_clock;
              const auto finish = system_clock::to_time_t(system_clock::now());
              std::tm finish_tm;
              localtime_r(&finish, &finish_tm);
              aout << ", ending at: " << std::put_time(&finish_tm, "%Y-%m-%d %H:%M:%S")
                   << endl;
            }
        } else {
            aerr << "Can't find service: " << service_name << endl;
        }
    }
			void validate_parameters()
			{
				cpu_nthread = max(1, cpu_nthread);
				gpu_nstream = max(1, gpu_nstream);
				nstream = (is_host())?cpu_nthread:gpu_nstream;

				if(!is_float() && !is_double())
					precision = eP_float;

				if(!is_host() && !is_device())
					device = e_host;

				if(!is_gpu_available())
				{
					device = e_host;
				}

				fp_seed = max(0, fp_seed);

				fp_nconf = (!is_frozen_phonon())?1:max(1, fp_nconf);

				fp_iconf_0 = (!is_frozen_phonon() || !fp_single_conf)?1:fp_single_conf;

				tm_nrot = max(1, tm_nrot);
				tm_irot = max(0, tm_irot);
				tm_nrot = max(1, tm_nrot);
				norm_Pos_3d(tm_u0);
				if(tm_rot_point_type == eRPT_geometric_center)
				{
					tm_p0 = Pos_3d<T>(atoms.x_mean, atoms.y_mean, atoms.z_mean);
				}

				if(is_tomography())
				{
					thickness_type = eTT_Whole_Specimen;
					if(potential_slicing == ePS_Planes)
					{
						potential_slicing = ePS_dz_Proj;
					}
				}

				islice = max(0, islice);

				gpu_device = max(0, gpu_device);

				if(isZero(Vrl))
				{
					Vrl = c_Vrl;
				}

				if(isZero(nR))
				{
					nR = c_nR;
				}

				dp_Shift = (is_PED())?true:false;

				if(!is_scanning())
				{
					scanning.set_default();
				}
				scanning.set_grid();

				lens.set_input_data(E_0, grid);

				det_cir.set_input_data(E_0);

				theta = set_incident_angle(theta);
				nrot = max(1, nrot);
				if(!is_PED_HCI())
				{
					nrot = 1;
				}
				//Set beam type
				if(is_user_define_wave())
				{
					beam_type = eBT_User_Define;
				}
				else if(is_convergent_beam_wave())
				{
					beam_type = eBT_Convergent;

					if(is_CBED_CBEI())
					{
						set_beam_position(cbe_fr.x0, cbe_fr.y0);
					}
					else if(is_EWFS_EWRS())
					{
						set_beam_position(ew_fr.x0, ew_fr.y0);
					}
				}
				else if(is_plane_wave())
				{
					beam_type = eBT_Plane_Wave;
				}

				if(is_EELS() || is_EFTEM())
				{
					coherent_contribution = false;
					interaction_model = multem::eESIM_Multislice;
				}

				if(is_EWFS_EWRS())
				{
					coherent_contribution = true;
				}

				slice_storage = false;
				if(is_PED_HCI() || is_EELS_EFTEM()|| is_ISTEM() ||(is_STEM() && !coherent_contribution))
				{
					slice_storage = true;
				}

				if(!is_multislice())
				{
					islice = 0;
					fp_dim.z = false;
					potential_slicing = ePS_Planes;
					thickness_type = eTT_Through_Thickness;
					slice_storage = slice_storage || !is_whole_specimen();
				}

				if(is_subslicing())
				{
					thickness_type = eTT_Whole_Specimen;
				}

				if(is_whole_specimen() || thickness.empty())
				{
					thickness_type = eTT_Whole_Specimen;
					thickness.resize(1);
					thickness[0] = atoms.z_max;
				}
				else if(is_through_thickness())
				{
					// for amorphous specimen it has to be modified
					thickness_type = eTT_Through_Thickness;
					std::sort(thickness.begin(), thickness.end());
					fp_dim.z = false;
					atoms.Sort_by_z();
					atoms.get_z_layer();			
					multem::match_vectors(atoms.z_layer.begin(), atoms.z_layer.end(), thickness);
				}
				else if(is_through_slices())
				{
					std::sort(thickness.begin(), thickness.end());
					atoms.Sort_by_z();
					atoms.get_z_layer();

					Vector<T, e_host> z_slice;
					atoms.get_z_slice(potential_slicing, grid.dz, atoms, z_slice);
					vector<T> z_slicet;
					z_slicet.assign(z_slice.begin(), z_slice.end());
					multem::match_vectors(z_slice.begin()+1, z_slice.end(), thickness);
				}
			}
Example #10
0
//------------------------------------------------------------------------------
//!
void
Puppeteer::resolveConstraints(
   Skeleton::Instance&                   skelInst,
   const Vector< PositionalConstraint >& constraints
)
{
   if( constraints.empty() ) return;

   Skeleton* skeleton = skelInst.skeleton();

   // Compute global root position.
   Vec3f root = skelInst.globalPosition(0);

   // 1. Transform constraints into end effector constraints.
   for( uint i = 0; i < constraints.size(); ++i )
   {
      int cID              = constraints[i].id();
      Skeleton::Limb& limb = skeleton->limbFromBone( cID );
      int eID              = limb.endEffectorID();

      Vec3f epos = skelInst.globalReferential( eID ).toMatrix() * skeleton->bone( eID ).endPoint();
      constraints[i]._currentPos     = epos;
      constraints[i]._endEffectorPos = constraints[i]._position;
      if( cID != eID )
      {
         Vec3f cpos = skelInst.globalReferential( cID ).toMatrix() * skeleton->bone( cID ).endPoint();
         constraints[i]._endEffectorPos -= cpos-epos;
      }

      Vec3f delta = root - skelInst.globalPosition( limb.boneID() );
      constraints[i]._sphere.center( constraints[i]._endEffectorPos + delta );
      constraints[i]._sphere.radius( limb.reachRadius()/constraints[i].weight() );
   }

   // 2. Compute an approximate root position.
   Vec3f dRoot(0.0f);
   float weights   = 0.0f;
   float maxWeight = 0.0f;
   for( uint i = 0; i < constraints.size(); ++i )
   {
      dRoot   += constraints[i].weight() * (constraints[i]._endEffectorPos-constraints[i]._currentPos);
      weights += constraints[i].weight();
      if( constraints[i].weight() > maxWeight )
      {
         maxWeight = constraints[i].weight();
      }
   }
   dRoot *= (maxWeight/weights)*0.8f;
   root  += dRoot;


   // 3. Adjust root position to satisfy all constraints.
   adjustRoot( constraints, root, dRoot );

   // 4. Adjust limbs positions.
   for( uint i = 0; i < constraints.size(); ++i )
   {
      adjustLimb( skelInst, constraints[i], dRoot );
   }

   // 5. Adjust skeleton instance.
   skelInst.offset().position() += dRoot;
   skelInst.updateGlobalTransforms();

   // TODO: Ajust end of limbs...
}
Example #11
0
bool PhysicsInterface::convertImageAlphaTo2DPolygons(const Image& image, Vector<Vector<Vec2>>& outPolygons,
                                                     bool flipHorizontally, bool flipVertically)
{
    // Check image is valid
    if (!image.isValid2DImage())
    {
        LOG_ERROR << "The passed image is not a valid 2D image: " << image;
        return false;
    }

    auto bitmap = Bitmap(image.getWidth(), image.getHeight());

    // Setup bitmap contents
    auto width = int(image.getWidth());
    for (auto i = 0U; i < bitmap.data.size(); i++)
        bitmap.set(i % width, i / width, image.getPixelColor(i % width, i / width).a > 0.5f);

    // Find all the edge pixels
    auto edgePixels = Vector<PolygonVertex>();
    for (auto y = 0; y < bitmap.height; y++)
    {
        for (auto x = 0; x < bitmap.width; x++)
        {
            if (bitmap.get(x, y) && (!bitmap.get(x - 1, y - 1) || !bitmap.get(x - 1, y) || !bitmap.get(x - 1, y + 1) ||
                                     !bitmap.get(x, y - 1) || !bitmap.get(x, y + 1) || !bitmap.get(x + 1, y - 1) ||
                                     !bitmap.get(x + 1, y) || !bitmap.get(x + 1, y + 1)))
                edgePixels.emplace(x, y);
        }
    }

    while (!edgePixels.empty())
    {
        // Start the next polygon at an unused edge pixel
        auto polygon = Vector<PolygonVertex>(1, edgePixels.popBack());

        // Each pixel that is put onto polygon can be backtracked if it leads to a dead end, this fixes problems with
        // pointy angles that can cause the edge walking to get stuck.
        auto hasBacktracked = false;

        while (true)
        {
            // Continue building this polygon by finding the next adjacent edge pixel
            auto adjacentPixel = 0U;
            for (; adjacentPixel < edgePixels.size(); adjacentPixel++)
            {
                if (bitmap.isAdjacent(polygon.back(), edgePixels[adjacentPixel]))
                    break;
            }

            // If there was no adjacent edge pixel then this polygon is malformed, so skip it and keep trying to build
            // more
            if (adjacentPixel == edgePixels.size())
            {
                if (!hasBacktracked)
                {
                    polygon.popBack();
                    hasBacktracked = true;

                    if (polygon.empty())
                        break;

                    continue;
                }
                else
                    break;
            }

            // Add the adjacent edge pixel to this polygon
            polygon.append(edgePixels[adjacentPixel]);
            edgePixels.erase(adjacentPixel);
            hasBacktracked = false;

            // Check whether this polygon is now complete, at least 4 points are required for a valid polygon
            if (polygon.size() < 4 || !bitmap.isAdjacent(polygon[0], polygon.back()))
                continue;

            // Now that a complete polygon has been constructed it needs to be simplified down as much as possible while
            // retaining key features such as large straight edges and right angles

            // Simplify perfectly horizontal and vertical edges as much as possible
            for (auto i = 0; i < int(polygon.size()); i++)
            {
                const auto& a = polygon[i];
                const auto& b = polygon[(i + 1) % polygon.size()];
                const auto& c = polygon[(i + 2) % polygon.size()];

                if ((a.x == b.x && a.x == c.x) || (a.y == b.y && a.y == c.y))
                    polygon.erase((i-- + 1) % polygon.size());
            }

            // Identify horizontal and vertical edges that are on the outside edge of the bitmap and mark their vertices
            // as important
            for (auto i = 0U; i < polygon.size(); i++)
            {
                auto& a = polygon[i];
                auto& b = polygon[(i + 1) % polygon.size()];
                if ((a.x == 0 || a.x == int(image.getWidth() - 1) || a.y == 0 || a.y == int(image.getHeight() - 1)) &&
                    a.isAxialEdge(b))
                {
                    a.keep = true;
                    b.keep = true;
                }
            }

            // Identify axial right angles and flag the relevant vertices as important
            for (auto i = 0U; i < polygon.size(); i++)
            {
                const auto& a = polygon[i];
                const auto& c = polygon[(i + 2) % polygon.size()];
                auto& b = polygon[(i + 1) % polygon.size()];

                if (a.isAxialEdge(b) && b.isAxialEdge(c) && (a - b).isRightAngle(c - b))
                    b.keep = true;
            }

            // The ends of straight edges that are not part of a right angle shape are pulled inwards by inserting new
            // vertices one pixel apart, this allows the ends of straight edges to undergo subsequent simplification.
            // The 'body' of the straight edge is then flagged as important to avoid any further simplification, which
            // will preserve the straight edge in the final result.
            const auto straightEdgePullBackSize = straightEdgeLength / 3;
            for (auto i = 0U; i < polygon.size(); i++)
            {
                auto a = polygon[i];
                auto b = polygon[(i + 1) % polygon.size()];

                if (a.isAxialEdge(b))
                {
                    auto xSign = Math::getSign(b.x - a.x);
                    auto ySign = Math::getSign(b.y - a.y);

                    if (!a.keep)
                    {
                        for (auto j = 0U; j < straightEdgePullBackSize; j++)
                            polygon.insert(i++, PolygonVertex(a.x + xSign * (j + 1), a.y + (j + 1) * ySign));

                        polygon[i].keep = true;
                    }

                    if (!b.keep)
                    {
                        for (auto j = 0U; j < straightEdgePullBackSize; j++)
                        {
                            polygon.insert(i++, PolygonVertex(b.x - (straightEdgePullBackSize - j) * xSign,
                                                              b.y - (straightEdgePullBackSize - j) * ySign));
                        }
                        polygon[i - straightEdgePullBackSize + 1].keep = true;
                    }
                }
            }

            // This is the main simplification loop, it works by trying to do progressively larger and larger
            // simplifcations on the polygon
            auto simplificationThreshold = 1.5f;
            while (polygon.size() > 3)
            {
                for (auto i = 0U; i < polygon.size(); i++)
                {
                    const auto& a = polygon[i];
                    const auto& b = polygon[(i + 1) % polygon.size()];
                    const auto& c = polygon[(i + 2) % polygon.size()];

                    // If b is important then don't try to get rid of it
                    if (b.keep)
                        continue;

                    // Get rid of point b if the line a-c is connected by an edge in the bitmap
                    if (a.distance(c) < simplificationThreshold && bitmap.arePixelsConnectedByEdge(a, c))
                        polygon.erase((i + 1) % polygon.size());
                }

                simplificationThreshold += 1.0f;
                if (simplificationThreshold >= std::max(image.getWidth(), image.getHeight()))
                    break;
            }

            if (polygon.size() < 3)
                break;

            outPolygons.enlarge(1);
            auto& outPolygon = outPolygons.back();

            // Scale to the range 0-1
            for (const auto& vertex : polygon)
                outPolygon.append(vertex.toVec2() / Vec2(float(image.getWidth() - 1), float(image.getHeight() - 1)));

            // Apply horizontal and vertical flips if requested
            if (flipHorizontally)
            {
                for (auto& vertex : outPolygon)
                    vertex.setXY(1.0f - vertex.x, vertex.y);
            }
            if (flipVertically)
            {
                for (auto& vertex : outPolygon)
                    vertex.setXY(vertex.x, 1.0f - vertex.y);
            }

            // Order vertices clockwise
            auto center = outPolygon.getAverage();
            if ((Vec3(outPolygon[0]) - center).cross(Vec3(outPolygon[1]) - center).z > 0.0f)
                outPolygon.reverse();

            break;
        }
    }

    return !outPolygons.empty();
}
Example #12
0
EditorFileSystem::DirItem* EditorFileSystem::_scan_dir(DirAccess *da,Set<String> &extensions,String p_name,float p_from,float p_range,const String& p_path,HashMap<String,FileCache> &file_cache,HashMap<String,DirCache> &dir_cache,EditorProgressBG& p_prog) {

	if (abort_scan)
		return NULL;

	if (p_path!=String()) {
		if (FileAccess::exists(("res://"+p_path).plus_file("engine.cfg"))) {
			return NULL;
		}
	}

	List<String> dirs;
	List<String> files;
	Set<String> pngs;

	String path=p_path;
	if (path.ends_with("/"))
		path=path.substr(0,path.length()-1);
	String global_path = Globals::get_singleton()->get_resource_path().plus_file(path);

	path="res://"+path;
	uint64_t mtime = FileAccess::get_modified_time(global_path);

	DirCache *dc = dir_cache.getptr(path);


	if (false && dc && dc->modification_time==mtime) {
		//use the cached files, since directory did not change
		for (Set<String>::Element *E=dc->subdirs.front();E;E=E->next()) {
			dirs.push_back(E->get());
		}
		for (Set<String>::Element *E=dc->files.front();E;E=E->next()) {
			files.push_back(E->get());
		}

	} else {
		//use the filesystem, some files may have changed
		Error err = da->change_dir(global_path);
		if (err!=OK) {
			print_line("Can't change to: "+path);
			ERR_FAIL_COND_V(err!=OK,NULL);
		}


		da->list_dir_begin();
		while (true) {

			bool isdir;
			String f = da->get_next(&isdir);
			if (f=="")
				break;
			if (isdir) {
				dirs.push_back(f);
			} else {
				String ext = f.extension().to_lower();
				if (extensions.has(ext))
					files.push_back(f);

			}

		}

		da->list_dir_end();
		files.sort();
		dirs.sort();

	}



	//print_line(da->get_current_dir()+": dirs: "+itos(dirs.size())+" files:"+itos(files.size()) );

	//find subdirs
	Vector<DirItem*> subdirs;

	//String current = da->get_current_dir();
	float idx=0;
	for (List<String>::Element *E=dirs.front();E;E=E->next(),idx+=1.0) {

		String d = E->get();
		if (d.begins_with(".")) //ignore hidden and . / ..
			continue;

		//ERR_CONTINUE( da->change_dir(d)!= OK );
		DirItem *sdi = _scan_dir(da,extensions,d,p_from+(idx/dirs.size())*p_range,p_range/dirs.size(),p_path+d+"/",file_cache,dir_cache,p_prog);
		if (sdi) {
			subdirs.push_back(sdi);
		}
		//da->change_dir(current);
	}


	if (subdirs.empty() && files.empty()) {
		total=p_from+p_range;
		p_prog.step(total*100);
		return NULL; //give up, nothing to do here
	}

	DirItem *di = memnew( DirItem );
	di->path=path;
	di->name=p_name;
	di->dirs=subdirs;
	di->modified_time=mtime;

	//add files
	for (List<String>::Element *E=files.front();E;E=E->next()) {

		SceneItem * si = memnew( SceneItem );
		si->file=E->get();
		si->path="res://"+p_path+si->file;
		FileCache *fc = file_cache.getptr(si->path);
		uint64_t mt = FileAccess::get_modified_time(si->path);

		if (fc && fc->modification_time == mt) {

			si->meta=fc->meta;
			si->type=fc->type;
			si->modified_time=fc->modification_time;
		} else {
			si->meta=_get_meta(si->path);
			si->type=ResourceLoader::get_resource_type(si->path);
			si->modified_time=mt;

		}

		if (si->meta.enabled) {
			md_count++;
			if (_check_meta_sources(si->meta)) {
				sources_changed.push_back(si->path);
			}
		}
		di->files.push_back(si);
	}

	total=p_from+p_range;
	p_prog.step(total*100);

	return di;
}
Example #13
0
void FileSystemDock::_file_option(int p_option) {

	switch(p_option) {


		case FILE_SHOW_IN_EXPLORER:
		case FILE_OPEN: {
			int idx=-1;
			for(int i=0;i<files->get_item_count();i++) {
				if (files->is_selected(i)) {
					idx=i;
					break;
				}
			}

			if (idx<0)
				return;



			String path = files->get_item_metadata(idx);
			if (p_option == FILE_SHOW_IN_EXPLORER) {
				String dir = Globals::get_singleton()->globalize_path(path);
				dir = dir.substr(0, dir.find_last("/"));
				OS::get_singleton()->shell_open(String("file://")+dir);
				return;
			}

			if (path.ends_with("/")) {
				if (path!="res://") {
					path=path.substr(0,path.length()-1);
				}
				this->path=path;
				_update_files(false);
				current_path->set_text(path);
				_push_to_history();
			} else {

				if (ResourceLoader::get_resource_type(path)=="PackedScene") {

					editor->open_request(path);
				} else {

					editor->load_resource(path);
				}
			}
		} break;
		case FILE_INSTANCE: {

			Vector<String> paths;

			for (int i = 0; i<files->get_item_count(); i++) {
				if (!files->is_selected(i))
					continue;
				String path =files->get_item_metadata(i);
				if (EditorFileSystem::get_singleton()->get_file_type(path)=="PackedScene") {
					paths.push_back(path);
				}
			}

			if (!paths.empty()) {
				emit_signal("instance", paths);
			}
		} break;
		case FILE_DEPENDENCIES: {

			int idx = files->get_current();
			if (idx<0 || idx>=files->get_item_count())
				break;
			String path = files->get_item_metadata(idx);
			deps_editor->edit(path);
		} break;
		case FILE_OWNERS: {

			int idx = files->get_current();
			if (idx<0 || idx>=files->get_item_count())
				break;
			String path = files->get_item_metadata(idx);
			owners_editor->show(path);
		} break;
		case FILE_MOVE: {

			move_dirs.clear();;
			move_files.clear();

			for(int i=0;i<files->get_item_count();i++) {

				String path = files->get_item_metadata(i);
				if (!files->is_selected(i))
					continue;

				 if (files->get_item_text(i)=="..") {
					 EditorNode::get_singleton()->show_warning(TTR("Can't operate on '..'"));
					 return;
				 }

				if (path.ends_with("/")) {
					move_dirs.push_back(path.substr(0,path.length()-1));
				} else {
					move_files.push_back(path);
				}
			}


			if (move_dirs.empty() && move_files.size()==1) {

				rename_dialog->clear_filters();
				rename_dialog->add_filter("*."+move_files[0].extension());
				rename_dialog->set_mode(EditorFileDialog::MODE_SAVE_FILE);
				rename_dialog->set_current_path(move_files[0]);
				rename_dialog->popup_centered_ratio();
				rename_dialog->set_title(TTR("Pick New Name and Location For:")+" "+move_files[0].get_file());


			} else {
				//just move
				move_dialog->popup_centered_ratio();
			}


		} break;
		case FILE_REMOVE: {

			Vector<String> torem;

			for(int i=0;i<files->get_item_count();i++) {

				String path = files->get_item_metadata(i);
				if (path.ends_with("/") || !files->is_selected(i))
					continue;
				torem.push_back(path);

			}

			if (torem.empty()) {
				EditorNode::get_singleton()->show_warning(TTR("No files selected!"));
				break;
			}

			remove_dialog->show(torem);
			//1) find if used
			//2) warn

		} break;
		case FILE_INFO: {

		} break;
		case FILE_REIMPORT: {


			Vector<String> reimport;
			for(int i=0;i<files->get_item_count();i++) {

				if (!files->is_selected(i))
					continue;

				String path = files->get_item_metadata(i);
				reimport.push_back(path);
			}

			ERR_FAIL_COND(reimport.size()==0);

			Ref<ResourceImportMetadata> rimd = ResourceLoader::load_import_metadata(reimport[0]);
			ERR_FAIL_COND(!rimd.is_valid());
			String editor=rimd->get_editor();

			if (editor.begins_with("texture_")) { //compatibility fix for old texture format
				editor="texture";
			}

			Ref<EditorImportPlugin> rimp = EditorImportExport::get_singleton()->get_import_plugin_by_name(editor);
			ERR_FAIL_COND(!rimp.is_valid());

			if (reimport.size()==1) {
				rimp->import_dialog(reimport[0]);
			} else {
				rimp->reimport_multiple_files(reimport);

			}

		} break;
		case FILE_COPY_PATH:

			int idx = files->get_current();
			if (idx<0 || idx>=files->get_item_count())
				break;
			String path = files->get_item_metadata(idx);
			OS::get_singleton()->set_clipboard(path);
	}
}
Example #14
0
void Forest::prepRenderImage( SceneRenderState *state )
{
    PROFILE_SCOPE(Forest_RenderCells);

    // TODO: Fix stats.
    /*
    ForestCellVector &theCells = mData->getCells();
    smTotalCells += theCells.size();

    // Don't render if we don't have a grid!
    if ( theCells.empty() )
       return false;
    */

    // Prepare to render.
    GFXTransformSaver saver;

    // Figure out the grid range in the viewing area.
    const bool isReflectPass = state->isReflectPass();

    const F32 cullScale = isReflectPass ? mReflectionLodScalar : 1.0f;

    // If we need to update our cached
    // zone state then do it now.
    if ( mZoningDirty )
    {
        mZoningDirty = false;

        Vector<ForestCell*> cells;
        mData->getCells(  &cells );
        for ( U32 i=0; i < cells.size(); i++ )
            cells[i]->_updateZoning( getSceneManager()->getZoneManager() );
    }

    // TODO: Move these into the TSForestItemData as something we
    // setup once and don't do per-instance.

    // Set up the TS render state.
    TSRenderState rdata;
    rdata.setSceneState( state );

    // Use origin sort on all forest elements as
    // its alot cheaper than the bounds sort.
    rdata.setOriginSort( true );

    // We may have some forward lit materials in
    // the forest, so pass down a LightQuery for it.
    LightQuery lightQuery;
    rdata.setLightQuery( &lightQuery );
    Frustum culler = state->getFrustum();

    // Adjust the far distance if the cull scale has changed.
    if ( !mIsEqual( cullScale, 1.0f ) )
    {
        const F32 visFarDist = culler.getFarDist() * cullScale;
        culler.setFarDist( visFarDist );
    }

    Box3F worldBox;

    // Used for debug drawing.
    GFXDrawUtil* drawer = GFX->getDrawUtil();
    drawer->clearBitmapModulation();

    // Go thru the visible cells.
    const Box3F &cullerBounds = culler.getBounds();
    const Point3F &camPos = state->getDiffuseCameraPosition();

    U32 clipMask;
    smAverageItemsPerCell = 0.0f;
    U32 cellsProcessed = 0;
    ForestCell *cell;

    // First get all the top level cells which
    // intersect the frustum.
    Vector<ForestCell*> cellStack;
    mData->getCells( culler, &cellStack );

    // Get the culling zone state.
    const BitVector &zoneState = state->getCullingState().getZoneVisibilityFlags();

    // Now loop till we run out of cells.
    while ( !cellStack.empty() )
    {
        // Pop off the next cell.
        cell = cellStack.last();
        cellStack.pop_back();

        const Box3F &cellBounds = cell->getBounds();

        // If the cell is empty or its bounds is outside the frustum
        // bounds then we have nothing nothing more to do.
        if ( cell->isEmpty() || !cullerBounds.isOverlapped( cellBounds ) )
            continue;

        // Can we cull this cell entirely?
        clipMask = culler.testPlanes( cellBounds, Frustum::PlaneMaskAll );
        if ( clipMask == -1 )
            continue;

        // Test cell visibility for interior zones.
        const bool visibleInside = !cell->getZoneOverlap().empty() ? zoneState.testAny( cell->getZoneOverlap() ) : false;

        // Test cell visibility for outdoor zone, but only
        // if we need to.
        bool visibleOutside = false;
        if( !cell->mIsInteriorOnly && !visibleInside )
        {
            U32 outdoorZone = SceneZoneSpaceManager::RootZoneId;
            visibleOutside = !state->getCullingState().isCulled( cellBounds, &outdoorZone, 1 );
        }

        // Skip cell if neither visible indoors nor outdoors.
        if( !visibleInside && !visibleOutside )
            continue;

        // Update the stats.
        smAverageItemsPerCell += cell->getItems().size();
        ++cellsProcessed;
        //if ( cell->isLeaf() )
        //++leafCellsProcessed;

        // Get the distance from the camera to the cell bounds.
        F32 dist = cellBounds.getDistanceToPoint( camPos );

        // If the largest item in the cell can be billboarded
        // at the cell distance to the camera... then the whole
        // cell can be billboarded.
        //
        if (  smForceImposters ||
                ( dist > 0.0f && cell->getLargestItem().canBillboard( state, dist ) ) )
        {
            // If imposters are disabled then skip out.
            if ( smDisableImposters )
                continue;

            PROFILE_SCOPE(Forest_RenderBatches);

            // Keep track of how many cells were batched.
            ++smCellsBatched;

            // Ok... everything in this cell should be batched.  First
            // create the batches if we don't have any.
            if ( !cell->hasBatches() )
                cell->buildBatches();

            //if ( drawCells )
            //mCellRenderFlag[ cellIter - theCells.begin() ] = 1;

            // TODO: Light queries for batches?

            // Now render the batches... we pass the culler if the
            // cell wasn't fully visible so that each batch can be culled.
            smCellItemsBatched += cell->renderBatches( state, clipMask != 0 ? &culler : NULL );
            continue;
        }

        // If this isn't a leaf then recurse.
        if ( !cell->isLeaf() )
        {
            cell->getChildren( &cellStack );
            continue;
        }

        // This cell has mixed billboards and mesh based items.
        ++smCellsRendered;

        PROFILE_SCOPE(Forest_RenderItems);

        //if ( drawCells )
        //mCellRenderFlag[ cellIter - theCells.begin() ] = 2;

        // Use the cell bounds as the light query volume.
        //
        // This means all forward lit items in this cell will
        // get the same lights, but it performs much better.
        lightQuery.init( cellBounds );

        // This cell is visible... have it render its items.
        smCellItemsRendered += cell->render( &rdata, clipMask != 0 ? &culler : NULL );
    }

    // Keep track of the average items per cell.
    if ( cellsProcessed > 0 )
        smAverageItemsPerCell /= (F32)cellsProcessed;

    // Got debug drawing to do?
    if ( smDrawCells && state->isDiffusePass() )
    {
        ObjectRenderInst *ri = state->getRenderPass()->allocInst<ObjectRenderInst>();
        ri->renderDelegate.bind( this, &Forest::_renderCellBounds );
        ri->type = RenderPassManager::RIT_Editor;
        state->getRenderPass()->addInst( ri );
    }
}
void TSLastDetail::_update()
{
   // We're gonna render... make sure we can.
   bool sceneBegun = GFX->canCurrentlyRender();
   if ( !sceneBegun )
      GFX->beginScene();

   _validateDim();

   Vector<GBitmap*> bitmaps;
   Vector<GBitmap*> normalmaps;

   // We need to create our own instance to render with.
   TSShapeInstance *shape = new TSShapeInstance( mShape, true );

   // Animate the shape once.
   shape->animate( mDl );

   // So we don't have to change it everywhere.
   const GFXFormat format = GFXFormatR8G8B8A8;  

   S32 imposterCount = ( ((2*mNumPolarSteps) + 1 ) * mNumEquatorSteps ) + ( mIncludePoles ? 2 : 0 );

   // Figure out the optimal texture size.
   Point2I texSize( smMaxTexSize, smMaxTexSize );
   while ( true )
   {
      Point2I halfSize( texSize.x / 2, texSize.y / 2 );
      U32 count = ( halfSize.x / mDim ) * ( halfSize.y / mDim );
      if ( count < imposterCount )
      {
         // Try half of the height.
         count = ( texSize.x / mDim ) * ( halfSize.y / mDim );
         if ( count >= imposterCount )
            texSize.y = halfSize.y;
         break;
      }

      texSize = halfSize;
   }

   GBitmap *imposter = NULL;
   GBitmap *normalmap = NULL;
   GBitmap destBmp( texSize.x, texSize.y, true, format );
   GBitmap destNormal( texSize.x, texSize.y, true, format );

   U32 mipLevels = destBmp.getNumMipLevels();

   ImposterCapture *imposterCap = new ImposterCapture();

   F32 equatorStepSize = M_2PI_F / (F32)mNumEquatorSteps;

   static const MatrixF topXfm( EulerF( -M_PI_F / 2.0f, 0, 0 ) );
   static const MatrixF bottomXfm( EulerF( M_PI_F / 2.0f, 0, 0 ) );

   MatrixF angMat;

   F32 polarStepSize = 0.0f;
   if ( mNumPolarSteps > 0 )
      polarStepSize = -( 0.5f * M_PI_F - mDegToRad( mPolarAngle ) ) / (F32)mNumPolarSteps;

   PROFILE_START(TSLastDetail_snapshots);

   S32 currDim = mDim;
   for ( S32 mip = 0; mip < mipLevels; mip++ )
   {
      if ( currDim < 1 )
         currDim = 1;
      
      dMemset( destBmp.getWritableBits(mip), 0, destBmp.getWidth(mip) * destBmp.getHeight(mip) * GFXFormat_getByteSize( format ) );
      dMemset( destNormal.getWritableBits(mip), 0, destNormal.getWidth(mip) * destNormal.getHeight(mip) * GFXFormat_getByteSize( format ) );

      bitmaps.clear();
      normalmaps.clear();

      F32 rotX = 0.0f;
      if ( mNumPolarSteps > 0 )
         rotX = -( mDegToRad( mPolarAngle ) - 0.5f * M_PI_F );

      // We capture the images in a particular order which must
      // match the order expected by the imposter renderer.

      imposterCap->begin( shape, mDl, currDim, mRadius, mCenter );

      for ( U32 j=0; j < (2 * mNumPolarSteps + 1); j++ )
      {
         F32 rotZ = -M_PI_F / 2.0f;

         for ( U32 k=0; k < mNumEquatorSteps; k++ )
         {            
            angMat.mul( MatrixF( EulerF( rotX, 0, 0 ) ),
                        MatrixF( EulerF( 0, 0, rotZ ) ) );

            imposterCap->capture( angMat, &imposter, &normalmap );

            bitmaps.push_back( imposter );
            normalmaps.push_back( normalmap );

            rotZ += equatorStepSize;
         }

         rotX += polarStepSize;

         if ( mIncludePoles )
         {
            imposterCap->capture( topXfm, &imposter, &normalmap );

            bitmaps.push_back(imposter);
            normalmaps.push_back( normalmap );

            imposterCap->capture( bottomXfm, &imposter, &normalmap );

            bitmaps.push_back( imposter );
            normalmaps.push_back( normalmap );
         }         
      }

      imposterCap->end();

      Point2I texSize( destBmp.getWidth(mip), destBmp.getHeight(mip) );

      // Ok... pack in bitmaps till we run out.
      for ( S32 y=0; y+currDim <= texSize.y; )
      {
         for ( S32 x=0; x+currDim <= texSize.x; )
         {
            // Copy the next bitmap to the dest texture.
            GBitmap* bmp = bitmaps.first();
            bitmaps.pop_front();
            destBmp.copyRect( bmp, RectI( 0, 0, currDim, currDim ), Point2I( x, y ), 0, mip );
            delete bmp;

            // Copy the next normal to the dest texture.
            GBitmap* normalmap = normalmaps.first();
            normalmaps.pop_front();
            destNormal.copyRect( normalmap, RectI( 0, 0, currDim, currDim ), Point2I( x, y ), 0, mip );
            delete normalmap;

            // Did we finish?
            if ( bitmaps.empty() )
               break;

            x += currDim;
         }

         // Did we finish?
         if ( bitmaps.empty() )
            break;

         y += currDim;
      }

      // Next mip...
      currDim /= 2;
   }

   PROFILE_END(); // TSLastDetail_snapshots

   delete imposterCap;
   delete shape;   
   
   
   // Should we dump the images?
   if ( Con::getBoolVariable( "$TSLastDetail::dumpImposters", false ) )
   {
      String imposterPath = mCachePath + ".imposter.png";
      String normalsPath = mCachePath + ".imposter_normals.png";

      FileStream stream;
      if ( stream.open( imposterPath, Torque::FS::File::Write  ) )
         destBmp.writeBitmap( "png", stream );
      stream.close();

      if ( stream.open( normalsPath, Torque::FS::File::Write ) )
         destNormal.writeBitmap( "png", stream );
      stream.close();
   }

   // DEBUG: Some code to force usage of a test image.
   //GBitmap* tempMap = GBitmap::load( "./forest/data/test1234.png" );
   //tempMap->extrudeMipLevels();
   //mTexture.set( tempMap, &GFXDefaultStaticDiffuseProfile, false );
   //delete tempMap;

   DDSFile *ddsDest = DDSFile::createDDSFileFromGBitmap( &destBmp );
   DDSUtil::squishDDS( ddsDest, GFXFormatDXT3 );

   DDSFile *ddsNormals = DDSFile::createDDSFileFromGBitmap( &destNormal );
   DDSUtil::squishDDS( ddsNormals, GFXFormatDXT5 );

   // Finally save the imposters to disk.
   FileStream fs;
   if ( fs.open( _getDiffuseMapPath(), Torque::FS::File::Write ) )
   {
      ddsDest->write( fs );
      fs.close();
   }
   if ( fs.open( _getNormalMapPath(), Torque::FS::File::Write ) )
   {
      ddsNormals->write( fs );
      fs.close();
   }

   delete ddsDest;
   delete ddsNormals;

   // If we did a begin then end it now.
   if ( !sceneBegun )
      GFX->endScene();
}
Example #16
0
void ConvexShape::Geometry::generate( const Vector< PlaneF > &planes, const Vector< Point3F > &tangents )
{
   PROFILE_SCOPE( Geometry_generate );

   points.clear();
   faces.clear();	

   AssertFatal( planes.size() == tangents.size(), "ConvexShape - incorrect plane/tangent count." );

#ifdef TORQUE_ENABLE_ASSERTS
   for ( S32 i = 0; i < planes.size(); i++ )
   {
      F32 dt = mDot( planes[i], tangents[i] );
      AssertFatal( mIsZero( dt, 0.0001f ), "ConvexShape - non perpendicular input vectors." );
      AssertFatal( planes[i].isUnitLength() && tangents[i].isUnitLength(), "ConvexShape - non unit length input vector." );
   }
#endif

   const U32 planeCount = planes.size();

   Point3F linePt, lineDir;   

   for ( S32 i = 0; i < planeCount; i++ )
   {      
      Vector< MathUtils::Line > collideLines;

      // Find the lines defined by the intersection of this plane with all others.

      for ( S32 j = 0; j < planeCount; j++ )
      {         
         if ( i == j )
            continue;

         if ( planes[i].intersect( planes[j], linePt, lineDir ) )
         {
            collideLines.increment();
            MathUtils::Line &line = collideLines.last();
            line.origin = linePt;
            line.direction = lineDir;   
         }         
      }

      if ( collideLines.empty() )
         continue;

      // Find edges and points defined by the intersection of these lines.
      // As we find them we fill them into our working ConvexShape::Face
      // structure.
      
      Face newFace;

      for ( S32 j = 0; j < collideLines.size(); j++ )
      {
         Vector< Point3F > collidePoints;

         for ( S32 k = 0; k < collideLines.size(); k++ )
         {
            if ( j == k )
               continue;

            MathUtils::LineSegment segment;
            MathUtils::mShortestSegmentBetweenLines( collideLines[j], collideLines[k], &segment );

            F32 dist = ( segment.p0 - segment.p1 ).len();

            if ( dist < 0.0005f )
            {
               S32 l = 0;
               for ( ; l < planeCount; l++ )
               {
                  if ( planes[l].whichSide( segment.p0 ) == PlaneF::Front )
                     break;
               }

               if ( l == planeCount )
                  collidePoints.push_back( segment.p0 );
            }
         }

         //AssertFatal( collidePoints.size() <= 2, "A line can't collide with more than 2 other lines in a convex shape..." );

         if ( collidePoints.size() != 2 )
            continue;

         // Push back collision points into our points vector
         // if they are not duplicates and determine the id
         // index for those points to be used by Edge(s).    

         const Point3F &pnt0 = collidePoints[0];
         const Point3F &pnt1 = collidePoints[1];
         S32 idx0 = -1;
         S32 idx1 = -1;

         for ( S32 k = 0; k < points.size(); k++ )
         {
            if ( pnt0.equal( points[k] ) )
            {
               idx0 = k;
               break;
            }
         }

         for ( S32 k = 0; k < points.size(); k++ )
         {
            if ( pnt1.equal( points[k] ) )
            {
               idx1 = k;
               break;
            }
         }

         if ( idx0 == -1 )
         {
            points.push_back( pnt0 );               
            idx0 = points.size() - 1;
         }

         if ( idx1 == -1 )
         {
            points.push_back( pnt1 );
            idx1 = points.size() - 1;
         }

         // Construct the Face::Edge defined by this collision.

         S32 localIdx0 = newFace.points.push_back_unique( idx0 );
         S32 localIdx1 = newFace.points.push_back_unique( idx1 );

         newFace.edges.increment();
         ConvexShape::Edge &newEdge = newFace.edges.last();
         newEdge.p0 = localIdx0;
         newEdge.p1 = localIdx1;
      }    

      if ( newFace.points.size() < 3 )
         continue;

      //AssertFatal( newFace.points.size() == newFace.edges.size(), "ConvexShape - face point count does not equal edge count." );


		// Fill in some basic Face information.

		newFace.id = i;
		newFace.normal = planes[i];
		newFace.tangent = tangents[i];


		// Make a working array of Point3Fs on this face.

		U32 pntCount = newFace.points.size();		
		Point3F *workPoints = new Point3F[ pntCount ];

		for ( S32 j = 0; j < pntCount; j++ )
			workPoints[j] = points[ newFace.points[j] ];


      // Calculate the average point for calculating winding order.

      Point3F averagePnt = Point3F::Zero;

		for ( S32 j = 0; j < pntCount; j++ )
			averagePnt += workPoints[j];

		averagePnt /= pntCount;		


		// Sort points in correct winding order.

		U32 *vertMap = new U32[pntCount];

      MatrixF quadMat( true );
      quadMat.setPosition( averagePnt );
      quadMat.setColumn( 0, newFace.tangent );
      quadMat.setColumn( 1, mCross( newFace.normal, newFace.tangent ) );
      quadMat.setColumn( 2, newFace.normal );
		quadMat.inverse();

      // Transform working points into quad space 
      // so we can work with them as 2D points.

      for ( S32 j = 0; j < pntCount; j++ )
         quadMat.mulP( workPoints[j] );

		MathUtils::sortQuadWindingOrder( true, workPoints, vertMap, pntCount );

      // Save points in winding order.

      for ( S32 j = 0; j < pntCount; j++ )
         newFace.winding.push_back( vertMap[j] );

      // Calculate the area and centroid of the face.

      newFace.area = 0.0f;
      for ( S32 j = 0; j < pntCount; j++ )
      {
         S32 k = ( j + 1 ) % pntCount;
         const Point3F &p0 = workPoints[ vertMap[j] ];
         const Point3F &p1 = workPoints[ vertMap[k] ];
         
         // Note that this calculation returns positive area for clockwise winding
         // and negative area for counterclockwise winding.
         newFace.area += p0.y * p1.x;
         newFace.area -= p0.x * p1.y;                  
      }

      //AssertFatal( newFace.area > 0.0f, "ConvexShape - face area was not positive." );
      if ( newFace.area > 0.0f )
         newFace.area /= 2.0f;      

      F32 factor;
      F32 cx = 0.0f, cy = 0.0f;
      
      for ( S32 j = 0; j < pntCount; j++ )
      {
         S32 k = ( j + 1 ) % pntCount;
         const Point3F &p0 = workPoints[ vertMap[j] ];
         const Point3F &p1 = workPoints[ vertMap[k] ];

         factor = p0.x * p1.y - p1.x * p0.y;
         cx += ( p0.x + p1.x ) * factor;
         cy += ( p0.y + p1.y ) * factor;
      }
      
      factor = 1.0f / ( newFace.area * 6.0f );
      newFace.centroid.set( cx * factor, cy * factor, 0.0f );
      quadMat.inverse();
      quadMat.mulP( newFace.centroid );

      delete [] workPoints;
      workPoints = NULL;

		// Make polygons / triangles for this face.

		const U32 polyCount = pntCount - 2;

		newFace.triangles.setSize( polyCount );

		for ( S32 j = 0; j < polyCount; j++ )
		{
			ConvexShape::Triangle &poly = newFace.triangles[j];

			poly.p0 = vertMap[0];

			if ( j == 0 )
			{
				poly.p1 = vertMap[ 1 ];
				poly.p2 = vertMap[ 2 ];
			}
			else
			{
				poly.p1 = vertMap[ 1 + j ];
				poly.p2 = vertMap[ 2 + j ];
			}
		}

		delete [] vertMap;


		// Calculate texture coordinates for each point in this face.

		const Point3F binormal = mCross( newFace.normal, newFace.tangent );
		PlaneF planey( newFace.centroid - 0.5f * binormal, binormal );
		PlaneF planex( newFace.centroid - 0.5f * newFace.tangent, newFace.tangent );

		newFace.texcoords.setSize( newFace.points.size() );

		for ( S32 j = 0; j < newFace.points.size(); j++ )
		{
			F32 x = planex.distToPlane( points[ newFace.points[ j ] ] );
			F32 y = planey.distToPlane( points[ newFace.points[ j ] ] );

			newFace.texcoords[j].set( -x, -y );
		}

      // Data verification tests.
#ifdef TORQUE_ENABLE_ASSERTS
      //S32 triCount = newFace.triangles.size();
      //S32 edgeCount = newFace.edges.size();
      //AssertFatal( triCount == edgeCount - 2, "ConvexShape - triangle/edge count do not match." );

      /*
      for ( S32 j = 0; j < triCount; j++ )
      {
         F32 area = MathUtils::mTriangleArea( points[ newFace.points[ newFace.triangles[j][0] ] ], 
                                              points[ newFace.points[ newFace.triangles[j][1] ] ],
                                              points[ newFace.points[ newFace.triangles[j][2] ] ] );
         AssertFatal( area > 0.0f, "ConvexShape - triangle winding bad." );
      }*/
#endif


      // Done with this Face.
      
      faces.push_back( newFace );
   }
}
Example #17
0
void ScenesDock::_file_option(int p_option) {

	switch(p_option) {


		case FILE_SHOW_IN_EXPLORER:
		case FILE_OPEN: {
			int idx=-1;
			for(int i=0;i<files->get_item_count();i++) {
				if (files->is_selected(i)) {
					idx=i;
					break;
				}
			}

			if (idx<0)
				return;



			String path = files->get_item_metadata(idx);
			if (p_option == FILE_SHOW_IN_EXPLORER) {
				String dir = Globals::get_singleton()->globalize_path(path);
				dir = dir.substr(0, dir.find_last("/"));
				OS::get_singleton()->shell_open(String("file://")+dir);
				return;
			}

			if (path.ends_with("/")) {
				if (path!="res://") {
					path=path.substr(0,path.length()-1);
				}
				this->path=path;
				_update_files(false);
				current_path->set_text(path);
				_push_to_history();
			} else {

				if (ResourceLoader::get_resource_type(path)=="PackedScene") {

					editor->open_request(path);
				} else {

					editor->load_resource(path);
				}
			}
		} break;
		case FILE_INSTANCE: {

			for (int i = 0; i<files->get_item_count(); i++) {

				String path =files->get_item_metadata(i);
				if (EditorFileSystem::get_singleton()->get_file_type(path)=="PackedScene") {
					emit_signal("instance",path);
				}
			}
		} break;
		case FILE_DEPENDENCIES: {

			int idx = files->get_current();
			if (idx<0 || idx>=files->get_item_count())
				break;
			String path = files->get_item_metadata(idx);
			deps_editor->edit(path);
		} break;
		case FILE_OWNERS: {

			int idx = files->get_current();
			if (idx<0 || idx>=files->get_item_count())
				break;
			String path = files->get_item_metadata(idx);
			owners_editor->show(path);
		} break;
		case FILE_MOVE: {

			move_dirs.clear();;
			move_files.clear();

			for(int i=0;i<files->get_item_count();i++) {

				String path = files->get_item_metadata(i);
				if (!files->is_selected(i))
					continue;

				 if (files->get_item_text(i)=="..") {
					 EditorNode::get_singleton()->show_warning(TTR("Can't operate on '..'"));
					 return;
				 }

				if (path.ends_with("/")) {
					move_dirs.push_back(path.substr(0,path.length()-1));
				} else {
					move_files.push_back(path);
				}
			}


			if (move_dirs.empty() && move_files.size()==1) {

				rename_dialog->clear_filters();
				rename_dialog->add_filter("*."+move_files[0].extension());
				rename_dialog->set_mode(EditorFileDialog::MODE_SAVE_FILE);
				rename_dialog->set_current_path(move_files[0]);
				rename_dialog->popup_centered_ratio();
				rename_dialog->set_title(TTR("Pick New Name and Location For:")+" "+move_files[0].get_file());


			} else {
				//just move
				move_dialog->popup_centered_ratio();
			}


		} break;
		case FILE_REMOVE: {

			Vector<String> torem;

			for(int i=0;i<files->get_item_count();i++) {

				String path = files->get_item_metadata(i);
				if (path.ends_with("/") || !files->is_selected(i))
					continue;
				torem.push_back(path);
			}

			if (torem.empty()) {
				EditorNode::get_singleton()->show_warning(TTR("No files selected!"));
				break;
			}

			remove_dialog->show(torem);
			//1) find if used
			//2) warn

		} break;
		case FILE_INFO: {

		} break;

	}
}
void
AllocationIntegrityState::dump()
{
#ifdef DEBUG
    fprintf(stderr, "Register Allocation Integrity State:\n");

    for (size_t blockIndex = 0; blockIndex < graph.numBlocks(); blockIndex++) {
        LBlock* block = graph.getBlock(blockIndex);
        MBasicBlock* mir = block->mir();

        fprintf(stderr, "\nBlock %lu", static_cast<unsigned long>(blockIndex));
        for (size_t i = 0; i < mir->numSuccessors(); i++)
            fprintf(stderr, " [successor %u]", mir->getSuccessor(i)->id());
        fprintf(stderr, "\n");

        for (size_t i = 0; i < block->numPhis(); i++) {
            const InstructionInfo& info = blocks[blockIndex].phis[i];
            LPhi* phi = block->getPhi(i);
            CodePosition input(block->getPhi(0)->id(), CodePosition::INPUT);
            CodePosition output(block->getPhi(block->numPhis() - 1)->id(), CodePosition::OUTPUT);

            fprintf(stderr, "[%u,%u Phi] [def %s] ",
                    input.bits(),
                    output.bits(),
                    phi->getDef(0)->toString().get());
            for (size_t j = 0; j < phi->numOperands(); j++)
                fprintf(stderr, " [use %s]", info.inputs[j].toString().get());
            fprintf(stderr, "\n");
        }

        for (LInstructionIterator iter = block->begin(); iter != block->end(); iter++) {
            LInstruction* ins = *iter;
            const InstructionInfo& info = instructions[ins->id()];

            CodePosition input(ins->id(), CodePosition::INPUT);
            CodePosition output(ins->id(), CodePosition::OUTPUT);

            fprintf(stderr, "[");
            if (input != CodePosition::MIN)
                fprintf(stderr, "%u,%u ", input.bits(), output.bits());
            fprintf(stderr, "%s]", ins->opName());

            if (ins->isMoveGroup()) {
                LMoveGroup* group = ins->toMoveGroup();
                for (int i = group->numMoves() - 1; i >= 0; i--) {
                    fprintf(stderr, " [%s -> %s]",
                            group->getMove(i).from().toString().get(),
                            group->getMove(i).to().toString().get());
                }
                fprintf(stderr, "\n");
                continue;
            }

            for (size_t i = 0; i < ins->numDefs(); i++)
                fprintf(stderr, " [def %s]", ins->getDef(i)->toString().get());

            for (size_t i = 0; i < ins->numTemps(); i++) {
                LDefinition* temp = ins->getTemp(i);
                if (!temp->isBogusTemp())
                    fprintf(stderr, " [temp v%u %s]", info.temps[i].virtualRegister(),
                            temp->toString().get());
            }

            size_t index = 0;
            for (LInstruction::InputIterator alloc(*ins); alloc.more(); alloc.next()) {
                fprintf(stderr, " [use %s", info.inputs[index++].toString().get());
                if (!alloc->isConstant())
                    fprintf(stderr, " %s", alloc->toString().get());
                fprintf(stderr, "]");
            }

            fprintf(stderr, "\n");
        }
    }

    // Print discovered allocations at the ends of blocks, in the order they
    // were discovered.

    Vector<IntegrityItem, 20, SystemAllocPolicy> seenOrdered;
    if (!seenOrdered.appendN(IntegrityItem(), seen.count())) {
        fprintf(stderr, "OOM while dumping allocations\n");
        return;
    }

    for (IntegrityItemSet::Enum iter(seen); !iter.empty(); iter.popFront()) {
        IntegrityItem item = iter.front();
        seenOrdered[item.index] = item;
    }

    if (!seenOrdered.empty()) {
        fprintf(stderr, "Intermediate Allocations:\n");

        for (size_t i = 0; i < seenOrdered.length(); i++) {
            IntegrityItem item = seenOrdered[i];
            fprintf(stderr, "  block %u reg v%u alloc %s\n",
                    item.block->mir()->id(), item.vreg, item.alloc.toString().get());
        }
    }

    fprintf(stderr, "\n");
#endif
}
Example #19
0
//------------------------------------------------------------------------------
//!
void
DFPolygonRenderable::update()
{
   Vector<float> egver; // vec3: pos, vec3: color
   Vector<float> vgver; // vec3: pos, vec3: color, flt: size
   Vector<uint32_t> eindices;
   Vector<uint32_t> vindices;

   const DFPolygon* poly = _editor->polygon();

   // Vertices.
   for( auto v = poly->begin(); v != poly->end(); ++v )
   {
      vindices.pushBack( uint(vindices.size()) );
      pushBack( vgver, (*v) );
      pushBack( vgver, _col_v_default );
      pushBack( vgver, _siz_v_default );

      pushBack( egver, (*v) );
      pushBack( egver, _col_e_default );
   }
   // Edges.
   const uint n = uint(poly->numVertices());
   for( uint i = 0, j = n-1; i < n; j=i++ )
   {
      eindices.pushBack( j );
      eindices.pushBack( i );
   }

   // Tweak for selection.
   auto& selection = _editor->selection();
   for( auto cur = selection.begin(); cur != selection.end(); ++cur )
   {
      float* ptr         = vgver.data() + cur->_idx*7;
      Vec3f::as( ptr+3 ) = (*cur) == selection.last() ? _col_v_selected_p : _col_v_selected_s;
      ptr[6]             = _siz_v_selected;
   }

   // Updating the meshes.
   MeshGeometry* emesh = edgeMesh();
   MeshGeometry* vmesh = vertexMesh();

   emesh->clearPatches();
   emesh->deallocate();
   if( !egver.empty() )
   {
      emesh->allocateIndices( uint(eindices.size()) );
      emesh->copyIndices( eindices.data() );
      emesh->allocateVertices( uint(egver.size())/6 );
      emesh->copyAttributes( egver.data(), 6, 6, 0 );
      emesh->addPatch( 0, uint(eindices.size()) );
   }
   emesh->updateProperties();
   emesh->invalidateRenderableGeometry();


   vmesh->clearPatches();
   vmesh->deallocate();
   if( !vgver.empty() )
   {
      vmesh->allocateIndices( uint(vindices.size()) );
      vmesh->copyIndices( vindices.data() );
      vmesh->allocateVertices( uint(vgver.size())/7 );
      vmesh->copyAttributes( vgver.data(), 7, 7, 0 );
      vmesh->addPatch( 0, uint(vindices.size()) );
   }
   vmesh->updateProperties();
   vmesh->invalidateRenderableGeometry();
}
Example #20
0
	virtual Vector<uint8_t> custom_export(String& p_path,const Ref<EditorExportPlatform> &p_platform) {
		//compile gdscript to bytecode

		if (EditorImportExport::get_singleton()->script_get_action()!=EditorImportExport::SCRIPT_ACTION_NONE) {

			if (p_path.ends_with(".gd")) {
				Vector<uint8_t> file = FileAccess::get_file_as_array(p_path);
				if (file.empty())
					return file;
				String txt;
				txt.parse_utf8((const char*)file.ptr(),file.size());
				file = GDTokenizerBuffer::parse_code_string(txt);

				if (!file.empty()) {

					if (EditorImportExport::get_singleton()->script_get_action()==EditorImportExport::SCRIPT_ACTION_ENCRYPT) {

						String tmp_path=EditorSettings::get_singleton()->get_settings_path().plus_file("tmp/script.gde");
						FileAccess *fa = FileAccess::open(tmp_path,FileAccess::WRITE);
						String skey=EditorImportExport::get_singleton()->script_get_encryption_key().to_lower();
						Vector<uint8_t> key;
						key.resize(32);
						for(int i=0;i<32;i++) {
							int v=0;
							if (i*2<skey.length()) {
								CharType ct = skey[i*2];
								if (ct>='0' && ct<='9')
									ct=ct-'0';
								else if (ct>='a' && ct<='f')
									ct=10+ct-'a';
								v|=ct<<4;
							}

							if (i*2+1<skey.length()) {
								CharType ct = skey[i*2+1];
								if (ct>='0' && ct<='9')
									ct=ct-'0';
								else if (ct>='a' && ct<='f')
									ct=10+ct-'a';
								v|=ct;
							}
							key[i]=v;
						}
						FileAccessEncrypted *fae=memnew(FileAccessEncrypted);
						Error err = fae->open_and_parse(fa,key,FileAccessEncrypted::MODE_WRITE_AES256);
						if (err==OK) {

							fae->store_buffer(file.ptr(),file.size());
							p_path=p_path.basename()+".gde";
						}

						memdelete(fae);

						file=FileAccess::get_file_as_array(tmp_path);
						return file;


					} else {

						p_path=p_path.basename()+".gdc";
						return file;
					}
				}

			}
		}

		return Vector<uint8_t>();
	}
Example #21
0
bool
ion::EliminatePhis(MIRGenerator *mir, MIRGraph &graph,
                   Observability observe)
{
    // Eliminates redundant or unobservable phis from the graph.  A
    // redundant phi is something like b = phi(a, a) or b = phi(a, b),
    // both of which can be replaced with a.  An unobservable phi is
    // one that whose value is never used in the program.
    //
    // Note that we must be careful not to eliminate phis representing
    // values that the interpreter will require later.  When the graph
    // is first constructed, we can be more aggressive, because there
    // is a greater correspondence between the CFG and the bytecode.
    // After optimizations such as GVN have been performed, however,
    // the bytecode and CFG may not correspond as closely to one
    // another.  In that case, we must be more conservative.  The flag
    // |conservativeObservability| is used to indicate that eliminate
    // phis is being run after some optimizations have been performed,
    // and thus we should use more conservative rules about
    // observability.  The particular danger is that we can optimize
    // away uses of a phi because we think they are not executable,
    // but the foundation for that assumption is false TI information
    // that will eventually be invalidated.  Therefore, if
    // |conservativeObservability| is set, we will consider any use
    // from a resume point to be observable.  Otherwise, we demand a
    // use from an actual instruction.

    Vector<MPhi *, 16, SystemAllocPolicy> worklist;

    // Add all observable phis to a worklist. We use the "in worklist" bit to
    // mean "this phi is live".
    for (PostorderIterator block = graph.poBegin(); block != graph.poEnd(); block++) {
        if (mir->shouldCancel("Eliminate Phis (populate loop)"))
            return false;

        MPhiIterator iter = block->phisBegin();
        while (iter != block->phisEnd()) {
            // Flag all as unused, only observable phis would be marked as used
            // when processed by the work list.
            iter->setUnused();

            // If the phi is redundant, remove it here.
            if (MDefinition *redundant = IsPhiRedundant(*iter)) {
                iter->replaceAllUsesWith(redundant);
                iter = block->discardPhiAt(iter);
                continue;
            }

            // Enqueue observable Phis.
            if (IsPhiObservable(*iter, observe)) {
                iter->setInWorklist();
                if (!worklist.append(*iter))
                    return false;
            }
            iter++;
        }
    }

    // Iteratively mark all phis reachable from live phis.
    while (!worklist.empty()) {
        if (mir->shouldCancel("Eliminate Phis (worklist)"))
            return false;

        MPhi *phi = worklist.popCopy();
        JS_ASSERT(phi->isUnused());
        phi->setNotInWorklist();

        // The removal of Phis can produce newly redundant phis.
        if (MDefinition *redundant = IsPhiRedundant(phi)) {
            // Add to the worklist the used phis which are impacted.
            for (MUseDefIterator it(phi); it; it++) {
                if (it.def()->isPhi()) {
                    MPhi *use = it.def()->toPhi();
                    if (!use->isUnused()) {
                        use->setUnusedUnchecked();
                        use->setInWorklist();
                        if (!worklist.append(use))
                            return false;
                    }
                }
            }
            phi->replaceAllUsesWith(redundant);
        } else {
            // Otherwise flag them as used.
            phi->setNotUnused();
        }

        // The current phi is/was used, so all its operands are used.
        for (size_t i = 0; i < phi->numOperands(); i++) {
            MDefinition *in = phi->getOperand(i);
            if (!in->isPhi() || !in->isUnused() || in->isInWorklist())
                continue;
            in->setInWorklist();
            if (!worklist.append(in->toPhi()))
                return false;
        }
    }

    // Sweep dead phis.
    for (PostorderIterator block = graph.poBegin(); block != graph.poEnd(); block++) {
        MPhiIterator iter = block->phisBegin();
        while (iter != block->phisEnd()) {
            if (iter->isUnused())
                iter = block->discardPhiAt(iter);
            else
                iter++;
        }
    }

    return true;
}
// Eliminate checks which are redundant given each other or other instructions.
//
// A type barrier is considered redundant if all missing types have been tested
// for by earlier control instructions.
//
// A bounds check is considered redundant if it's dominated by another bounds
// check with the same length and the indexes differ by only a constant amount.
// In this case we eliminate the redundant bounds check and update the other one
// to cover the ranges of both checks.
//
// Bounds checks are added to a hash map and since the hash function ignores
// differences in constant offset, this offers a fast way to find redundant
// checks.
bool
ion::EliminateRedundantChecks(MIRGraph &graph)
{
    BoundsCheckMap checks;

    if (!checks.init())
        return false;

    // Stack for pre-order CFG traversal.
    Vector<MBasicBlock *, 1, IonAllocPolicy> worklist;

    // The index of the current block in the CFG traversal.
    size_t index = 0;

    // Add all self-dominating blocks to the worklist.
    // This includes all roots. Order does not matter.
    for (MBasicBlockIterator i(graph.begin()); i != graph.end(); i++) {
        MBasicBlock *block = *i;
        if (block->immediateDominator() == block) {
            if (!worklist.append(block))
                return false;
        }
    }

    // Starting from each self-dominating block, traverse the CFG in pre-order.
    while (!worklist.empty()) {
        MBasicBlock *block = worklist.popCopy();

        // Add all immediate dominators to the front of the worklist.
        for (size_t i = 0; i < block->numImmediatelyDominatedBlocks(); i++) {
            if (!worklist.append(block->getImmediatelyDominatedBlock(i)))
                return false;
        }

        for (MDefinitionIterator iter(block); iter; ) {
            bool eliminated = false;

            if (iter->isBoundsCheck()) {
                if (!TryEliminateBoundsCheck(checks, index, iter->toBoundsCheck(), &eliminated))
                    return false;
            } else if (iter->isTypeBarrier()) {
                if (!TryEliminateTypeBarrier(iter->toTypeBarrier(), &eliminated))
                    return false;
            } else if (iter->isConvertElementsToDoubles()) {
                // Now that code motion passes have finished, replace any
                // ConvertElementsToDoubles with the actual elements.
                MConvertElementsToDoubles *ins = iter->toConvertElementsToDoubles();
                ins->replaceAllUsesWith(ins->elements());
            }

            if (eliminated)
                iter = block->discardDefAt(iter);
            else
                iter++;
        }
        index++;
    }

    JS_ASSERT(index == graph.numBlocks());
    return true;
}
int main(){
    
    std::cout << "Declaring a vector of tests, with objects of type \"int\"...\n";
    Vector<int> vtest;
    
    std::cout << "Testing if the vector is empty even...\n";
    assert( vtest.empty() );
    std::cout << ".. Everything seemed to go well...\n";
    
    std::cout << "Testing capacity [" << vtest.capacity() << "...\n";
    assert( vtest.capacity() == 10 );
    
    std::cout << "Starting tests in push_back method...\n";

    for( auto i = 0; i < 6; i++){
        std::cout << "push_back [" << i <<"]\n";
        vtest.push_back(i);
    }
    
    std::cout << "\nThe vector after insertions: [ ";
    for ( auto var : vtest )
        std::cout << var << " ";
    std::cout << "]\n";
    
    assert( vtest.size() == 6 );
    
    std::cout << "=========================================\n";
    
    /*
    std::cout << "Testing the method data...\n";
    int *ptr = vtest.data();
    for (auto i = 0; i < 10; i++)
        assert(*(ptr+i) == i);
        
    std::cout << "=========================================\n";
    */
    
    std::cout << "Testing methods front and back...\n";
    assert( vtest.front() == 0 );
    assert( vtest.back() == 5 );
    
    std::cout << "=========================================\n";
    
    std::cout << "Testing method pop_back...\n";
    vtest.pop_back();
    
    std::cout << "\nThe vector after removal: [ ";
    for ( auto var : vtest )
        std::cout << var << " ";
    std::cout << "]\n";
    
    assert( vtest.size() == 5 );
    
    std::cout << "=========================================\n";
    
    std::cout << "Testing operator[] and operator=...\n";
    std::cout << "vector[4] = 100 testing...\n";
    vtest[4] = 100;
    
    std::cout << "\nThe vector after insertion: [ ";
    for ( auto var : vtest )
        std::cout << var << " ";
    std::cout << "]\n";
    
    std::cout << "Testing at operator...\n";
    assert( vtest.at(4) == 100 );
    
    std::cout << "=========================================\n";
    
    std::cout << "Testing method assign... \n";
    
    vtest.assign(500);
    
    std::cout << "\nThe vector after asign: [ ";
    for ( auto var : vtest )
        std::cout << var << " ";
    std::cout << "]\n";
    
    assert( vtest.front() == 500 );
    assert( vtest.back() == 500 );
    
    std::cout << "=========================================\n";
    
    std::cout << "Testing clear method...\n";
    
    vtest.clear();
    
    std::cout << "\nThe vector after clear: [ ";
    for ( auto var : vtest )
        std::cout << var << " ";
    std::cout << "]\n";
    
    std::cout << "=========================================\n";
    
    for( auto i = 0; i < 10; i++)
        vtest.push_back(i);
    
    std::cout << "\nJust filling again: [ ";
    for ( auto var : vtest )
        std::cout << var << " ";
    std::cout << "]\n";
    
    std::cout << "=========================================\n";
    
    size_type newCpct = 20;
    vtest.reserve( newCpct );
    
    for( auto i = 10; i < 20; i++)
        vtest.push_back(i);
    
    std::cout << "\nVector after reserve memory: [ ";
    for ( auto var : vtest )
        std::cout << var << " ";
    std::cout << "]\n";
    std::cout << "=========================================\n";
    
    std::cout << "Testing \"copy assign\" and \"copy move\"...\n";
    
    std::cout << "Creating a vector of int [v2]...\n";
    Vector<int> v2 ( vtest );
    std::cout << "Creating a vector of int [v3] and type v3 = v2... \n";
    Vector<int> v3 = v2;
    
    std::cout << "Cheking...\n";
    for (auto i = 0; i < 9; i++) {
        assert(v2[i] == i);
        assert(v3[i] == i);
    }
    
    std::cout << "Creating v4 and moving v2's elements to it... \n";
    Vector<int> v4 ( std::move( v2 ) );
    std::cout << "Creating v5 and moving v3's elements to it... \n";
    Vector<int> v5 = std::move( v3 );
    
    std::cout << "Checking...\n";
    for (auto i = 0; i < 9; i++) {
        assert(v4[i] == i);
        assert(v5[i] == i);
    }
    
    std::cout << "Benchmark v2 and v3 is null...\n";
    
    assert( v2.data() == nullptr );
    assert( v3.data() == nullptr );
    
    std::cout << "It seems that everything worked out here... \n";
    
    std::cout << "=========================================\n";
    
    std::cout << "Testing iterators... \n";
    std::cout << "Creatin a vector [vtest2].\n";
    Vector<int> vtest2;
    
    for( auto i = 0; i < 10; i++)
        vtest2.push_back(i);
    
    std::cout << "\nJust filling: [ ";
    for ( auto var : vtest )
        std::cout << var << " ";
    std::cout << "]\n";
    
    auto i = 0;
    for ( auto it = vtest2.begin(); it != vtest2.end(); it++, i++)
        assert(*it == i);

    assert( vtest2.cbegin() != vtest2.cend() );
    
    std::cout << "=========================================\n";
    std::cout << "Everything seemed to run fine, leaving the program ...\n";

    return EXIT_SUCCESS;
}
Example #24
0
Loop::LoopReturn
Loop::init()
{
    IonSpew(IonSpew_LICM, "Loop identified, headed by block %d", header_->id());
    IonSpew(IonSpew_LICM, "footer is block %d", header_->backedge()->id());

    // The first predecessor of the loop header must dominate the header.
    JS_ASSERT(header_->id() > header_->getPredecessor(0)->id());

    // Loops from backedge to header and marks all visited blocks
    // as part of the loop. At the same time add all hoistable instructions
    // (in RPO order) to the instruction worklist.
    Vector<MBasicBlock *, 1, IonAllocPolicy> inlooplist;
    if (!inlooplist.append(header_->backedge()))
        return LoopReturn_Error;
    header_->backedge()->mark();

    while (!inlooplist.empty()) {
        MBasicBlock *block = inlooplist.back();

        // Hoisting requires more finesse if the loop contains a block that
        // self-dominates: there exists control flow that may enter the loop
        // without passing through the loop preheader.
        //
        // Rather than perform a complicated analysis of the dominance graph,
        // just return a soft error to ignore this loop.
        if (block->immediateDominator() == block) {
            while (!worklist_.empty())
                popFromWorklist();
            return LoopReturn_Skip;
        }

        // Add not yet visited predecessors to the inlooplist.
        if (block != header_) {
            for (size_t i = 0; i < block->numPredecessors(); i++) {
                MBasicBlock *pred = block->getPredecessor(i);
                if (pred->isMarked())
                    continue;

                if (!inlooplist.append(pred))
                    return LoopReturn_Error;
                pred->mark();
            }
        }

        // If any block was added, process them first.
        if (block != inlooplist.back())
            continue;

        // Add all instructions in this block (but the control instruction) to the worklist
        for (MInstructionIterator i = block->begin(); i != block->end(); i++) {
            MInstruction *ins = *i;

            // Remember whether this loop contains anything which clobbers most
            // or all floating-point registers. This is just a rough heuristic.
            if (ins->possiblyCalls())
                containsPossibleCall_ = true;

            if (isHoistable(ins)) {
                if (!insertInWorklist(ins))
                    return LoopReturn_Error;
            }
        }

        // All successors of this block are visited.
        inlooplist.popBack();
    }

    return LoopReturn_Success;
}
Example #25
0
bool
ValueNumberer::eliminateRedundancies()
{
    // A definition is 'redundant' iff it is dominated by another definition
    // with the same value number.
    //
    // So, we traverse the dominator tree in pre-order, maintaining a hashmap
    // from value numbers to instructions.
    //
    // For each definition d with value number v, we look up v in the hashmap.
    //
    // If there is a definition d' in the hashmap, and the current traversal
    // index is within that instruction's dominated range, then we eliminate d,
    // replacing all uses of d with uses of d'.
    //
    // If there is no valid definition in the hashtable (the current definition
    // is not in dominated scope), then we insert the current instruction,
    // since it is the most dominant instruction with the given value number.

    InstructionMap defs;

    if (!defs.init())
        return false;

    IonSpew(IonSpew_GVN, "Eliminating redundant instructions");

    // Stack for pre-order CFG traversal.
    Vector<MBasicBlock *, 1, IonAllocPolicy> worklist;

    // The index of the current block in the CFG traversal.
    size_t index = 0;

    // Add all self-dominating blocks to the worklist.
    // This includes all roots. Order does not matter.
    for (MBasicBlockIterator i(graph_.begin()); i != graph_.end(); i++) {
        MBasicBlock *block = *i;
        if (block->immediateDominator() == block) {
            if (!worklist.append(block))
                return false;
        }
    }

    // Starting from each self-dominating block, traverse the CFG in pre-order.
    while (!worklist.empty()) {
        MBasicBlock *block = worklist.popCopy();

        IonSpew(IonSpew_GVN, "Looking at block %d", block->id());

        // Add all immediate dominators to the front of the worklist.
        for (size_t i = 0; i < block->numImmediatelyDominatedBlocks(); i++) {
            if (!worklist.append(block->getImmediatelyDominatedBlock(i)))
                return false;
        }

        // For each instruction, attempt to look up a dominating definition.
        for (MDefinitionIterator iter(block); iter; ) {
            MDefinition *ins = simplify(*iter, true);

            // Instruction was replaced, and all uses have already been fixed.
            if (ins != *iter) {
                iter = block->discardDefAt(iter);
                continue;
            }

            // Instruction has side-effects and cannot be folded.
            if (!ins->isMovable() || ins->isEffectful()) {
                iter++;
                continue;
            }

            MDefinition *dom = findDominatingDef(defs, ins, index);
            if (!dom)
                return false; // Insertion failed.

            if (dom == ins || !dom->updateForReplacement(ins)) {
                iter++;
                continue;
            }

            IonSpew(IonSpew_GVN, "instruction %d is dominated by instruction %d (from block %d)",
                    ins->id(), dom->id(), dom->block()->id());

            ins->replaceAllUsesWith(dom);

            JS_ASSERT(!ins->hasUses());
            JS_ASSERT(ins->block() == block);
            JS_ASSERT(!ins->isEffectful());
            JS_ASSERT(ins->isMovable());

            iter = ins->block()->discardDefAt(iter);
        }
        index++;
    }

    JS_ASSERT(index == graph_.numBlocks());
    return true;
}
bool SmoothConstrainedInterpolator::Make(const Config& qa,const Vector& da,const Config& qb,const Vector& db,
					 GeneralizedCubicBezierSpline& path,
					 bool checkConstraints)
{   

  Vector temp(constraint->NumDimensions());
  ConstraintValue(qa,temp);
  if(temp.maxAbsElement() > ftol) {
    fprintf(stderr,"ConstrainedInterpolator: Warning, initial point a is not on manifold, error %g\n",temp.maxAbsElement());
  }
  ConstraintValue(qb,temp);
  if(temp.maxAbsElement() > ftol) {
    fprintf(stderr,"ConstrainedInterpolator: Warning, initial point b is not on manifold, error %g\n",temp.maxAbsElement());
  }

  Vector da2=da,db2=db;
  if(!da.empty()) {
    if(!ProjectVelocity(qa,da2)) {
      fprintf(stderr,"ConstrainedInterpolator: Warning, initial velocity a could not be projected\n");
      da2.setZero();
    }
  }
  if(!db.empty()) {
    if(!ProjectVelocity(qb,db2)) {
      fprintf(stderr,"ConstrainedInterpolator: Warning, initial velocity b could not be projected\n");
      db2.setZero();
    }
  }

  const static double third = 1.0/3.0;
  const static double sixth = 1.0/6.0;
  list<pair<GeneralizedCubicBezierCurve,double> > lpath;
  GeneralizedCubicBezierCurve curve(space);
  curve.x0 = qa;
  curve.x3 = qb;
  curve.SetNaturalTangents(da2,db2);
  bool redo = (da2.empty() || db2.empty());
  if(da2.empty()) {
    curve.Deriv(0,da2);
    if(!ProjectVelocity(qa,da2)) {
      fprintf(stderr,"ConstrainedInterpolator: Warning, start velocity a could not be projected\n");
      da2.setZero();
    }
  }
  if(db2.empty()) {
    curve.Deriv(1,db2);
    if(!ProjectVelocity(qb,db2)) {
      fprintf(stderr,"ConstrainedInterpolator: Warning, end velocity b could not be projected\n");
      db2.setZero();
    }
  }
  if(redo) 
    curve.SetNaturalTangents(da2,db2);

#if CONDITION_INITIAL_TANGENTS
  ConditionTangents(curve);
#endif
  lpath.push_back(pair<GeneralizedCubicBezierCurve,double>(curve,1.0));
  priority_queue<Segment2,vector<Segment2> > q;
  Segment2 s;
  s.prev = lpath.begin();
  s.length = curve.OuterLength();
  q.push(s);

  GeneralizedCubicBezierCurve c1(space),c2(space);
  Config x,v;
  while(!q.empty()) {
    s=q.top(); q.pop();
    if(s.length <= xtol) continue;
    list<pair<GeneralizedCubicBezierCurve,double> >::iterator c = s.prev;
    list<pair<GeneralizedCubicBezierCurve,double> >::iterator n = c; ++n;

    /*
    //optimize end tangents to release tension
    Config* prev=NULL, *next=NULL;
    Real prevdur = 1.0, nextdur = 1.0;
    if(c != lpath.begin()) {
      list<pair<GeneralizedCubicBezierCurve,double> >::iterator p = c; --p;
      prev = &p->first.x0;
      prevdur = p->second;
    }
    if(n != lpath.end()) {
      next = &n->first.x3;
      nextdur = n->second;
    }
    c->first.SetSmoothTangents(prev,next,prevdur/c->second,nextdur/c->second);
    Vector v1,v2;
    c->first.Deriv(0,v1);
    c->first.Deriv(1,v2);
    ProjectVelocity(c->first.x0,v1);
    ProjectVelocity(c->first.x3,v2);
    c->first.SetNaturalTangents(v1,v2);
    */

    //c->first.Eval(0.5,x);
    c->first.Midpoint(x);
    //cout<<"Depth: "<<c->second<<endl;
    //cout<<"Bspline midpoint: "<<x<<", "<<v<<endl;
    //cout<<"Original point :"<<x<<endl;
    if(!Project(x)) {
      ConstraintValue(x,temp);
      if(gConstrainedInterpolateVerbose) cout<<"Projection of point "<<x<<" failed, "<<" error "<<temp.maxAbsElement()<<endl;
      return false;
    }
    //cout<<"Projected midpoint: "<<x<<", "<<v<<endl;
    //getchar(); 

#if OPTIMIZE_TANGENTS
    if(space) FatalError("Can't optimize tangents with a manifold");
    //scale the tangents of the curve so that the midpoint gets closer to x
    //xmid = (x0/8+3/8 x1 + 3/8 x2 + x3/8) + 3/8 (alpha (x1-x0) - beta (x3-x2))
    //Solve least squares
    //cout<<"Projected point :"<<x<<endl;
    Vector t1=c->first.x1-c->first.x0,t2=c->first.x3-c->first.x2;
    Vector xmid = (c->first.x0+c->first.x3)*0.5 + 3.0/8.0*(t1-t2);
    Vector rhs = (x - xmid)*8.0*third;
    Real origDist = xmid.distance(x);
    //cout<<"Xmid "<<xmid<<endl;
    Vector2 Atb(dot(rhs,t1),-dot(rhs,t2));
    Matrix2 AtA;
    AtA(0,0) = dot(t1,t1) + 1e-1*s.length;
    AtA(0,1) = AtA(1,0) = -dot(t1,t2);
    AtA(1,1) = dot(t2,t2) + 1e-1*s.length;
    //cout<<"AtA: "<<AtA<<endl;
    bool res = AtA.inplaceInverse();
    if(res) {
      Vector2 alphabeta = AtA*Atb;
      //cout<<"Scaling: "<<alphabeta<<endl;
      if(space) {
	space->Integrate(c->first.x0,(1.0+alphabeta.x)*t1,c->first.x1);
	space->Integrate(c->first.x3,-(1.0+alphabeta.y)*t2,c->first.x2);
      }
      else {
	c->first.x1 = c->first.x0 + (1.0+alphabeta.x)*t1;
	c->first.x2 = c->first.x3 - (1.0+alphabeta.y)*t2;
      }
      duration1 *= (1.0+alphabeta.x);
      duration2 *= (1.0+alphabeta.y);
      c->first.Eval(0.5,rhs);
      Real newDist = rhs.distance(x);
      //Assert(newDist <= origDist+Epsilon);
      //cout<<"Result: "<<rhs<<endl;
      //getchar();
    }
#endif // OPTIMIZE_TANGENTS

    if(checkConstraints && space && !space->IsFeasible(x)) return false;

    //c->first.Deriv(0.5,v);
    c->first.MidpointDeriv(v);
    ProjectVelocity(x,v);

    //subdivide, insert the split segments into the queue
    c1.x0 = c->first.x0;
    if(space) {
      space->Interpolate(c->first.x0,c->first.x1,0.5,c1.x1); 
      space->Integrate(x,v*(-sixth),c1.x2);
    }
    else {
      interpolate(c->first.x0,c->first.x1,0.5,c1.x1); 
      c1.x2 = x-v*sixth;
    }
    c1.x3 = x;
    c2.x0 = x;
    if(space) {
      space->Integrate(x,v*sixth,c2.x1);
    }
    else {
      c2.x1 = x+v*sixth; 
    }
    Interpolate(space,c->first.x2,c->first.x3,0.5,c2.x2);
    c2.x3 = c->first.x3;
    //sanity check
    /*
    Vector temp;
    c1.Deriv(0,temp);
    if(!temp.isEqual((c->first.x1-c->first.x0)*3.0*0.5,1e-4)) {
      cout<<"Invalid starting derivative of subdivision 1"<<endl;
      cout<<temp<<" vs "<<(c->first.x1-c->first.x0)*3.0*0.5<<endl;
      getchar();
    }
    c1.Deriv(1,temp);
    if(!temp.isEqual(v*0.5,1e-4)) {
      cout<<"Invalid ending derivative of subdivision 1"<<endl;
      cout<<temp<<" vs "<<v*0.5<<endl;
      getchar();
    }
    c2.Deriv(0,temp);
    if(!temp.isEqual(v*0.5,1e-4)) {
      cout<<"Invalid starting derivative of subdivision 2"<<endl;
      cout<<temp<<" vs "<<v*0.5<<endl;
      getchar();
    }
    c2.Deriv(1,temp);
    if(!temp.isEqual((c->first.x3-c->first.x2)*3.0*0.5,1e-4)) {
      cout<<"Invalid ending derivative of subdivision 2"<<endl;
      cout<<temp<<" vs "<<(c->first.x3-c->first.x2)*3.0*0.5<<endl;
      getchar();
    }
    */


#if CONDITION_MIDDLE_TANGENTS
    ConditionMiddleTangent(c1,c2);
#endif // CONDITION_MIDDLE_TANGENTS

    Real l1=c1.OuterLength();
    Real l2=c2.OuterLength();
    if(l1 > 0.5*(1.0+maxGrowth)*s.length) {
      if(gConstrainedInterpolateVerbose) {
	cout<<"Projection exceeded growth factor: ";
	cout<<l1<<" > "<<0.5*(1.0+maxGrowth)*s.length<<endl;
      }
      /*
      cout<<c->first.x0<<", "<<c->first.x1<<", "<<c->first.x2<<", "<<c->first.x3<<endl;
      c->first.Midpoint(x);
      c->first.MidpointDeriv(v);
      cout<<"Midpoint: "<<x<<", deriv "<<v<<endl;
      Project(x);
      ProjectVelocity(x,v);
      cout<<"Projected midpoint: "<<x<<", deriv "<<v<<endl;
      */
      //getchar();
      return false;
    }
    if(l2 > 0.5*(1.0+maxGrowth)*s.length) {
      if(gConstrainedInterpolateVerbose) {
	cout<<"Projection exceeded growth factor: ";
	cout<<l2<<" > "<<0.5*(1.0+maxGrowth)*s.length<<endl;
      }
      /*
      cout<<c->first.x0<<", "<<c->first.x1<<", "<<c->first.x2<<", "<<c->first.x3<<endl;      
      c->first.Midpoint(x);
      c->first.MidpointDeriv(v);
      cout<<"Midpoint: "<<x<<", deriv "<<v<<endl;
      Project(x);
      ProjectVelocity(x,v);
      cout<<"Projected midpoint: "<<x<<", deriv "<<v<<endl;
      */
      return false;
    }

    //need to scale previous and next durations by 0.5
    Real origDuration = c->second;
    c->first = c1;
    c->second = 0.5*origDuration;
    list<pair<GeneralizedCubicBezierCurve,double> >::iterator m=lpath.insert(n,pair<GeneralizedCubicBezierCurve,double>(c2,0.5*origDuration));

    s.prev = c;
    s.length = l1;
    if(s.length > xtol) q.push(s);

    s.prev = m;
    s.length = l2;
    if(s.length > xtol) q.push(s);
  }

  //read out the path
  path.segments.resize(lpath.size());
  path.durations.resize(lpath.size());
  size_t k=0;
  for(list<pair<GeneralizedCubicBezierCurve,double> >::iterator i=lpath.begin();i!=lpath.end();i++,k++) {
    path.segments[k] = i->first;
    path.durations[k] = i->second;
  }
  return true;
}
Example #27
0
inline bool StructuredGauss::isRowNull(Vector& v)
{
	return v.empty ();
}
Example #28
0
static void assemSparse (const Matrix& eM, SparseMatrix& SM, Vector& SV,
                         const IntVec& meen, const int* meqn,
                         const int* mpmceq, const int* mmceq, const Real* ttcc)
{
  // Add elements corresponding to free dofs in eM into SM
  int i, j, ip, nedof = meen.size();
  for (j = 1; j <= nedof; j++)
  {
    int jeq = meen[j-1];
    if (jeq < 1) continue;

    SM(jeq,jeq) += eM(j,j);

    for (i = 1; i < j; i++)
    {
      int ieq = meen[i-1];
      if (ieq < 1) continue;

      SM(ieq,jeq) += eM(i,j);
      SM(jeq,ieq) += eM(j,i);
    }
  }

  // Add (appropriately weighted) elements corresponding to constrained
  // (dependent and prescribed) dofs in eM into SM and/or SV
  for (j = 1; j <= nedof; j++)
  {
    int jceq = -meen[j-1];
    if (jceq < 1) continue;

    int jp = mpmceq[jceq-1];
    Real c0 = ttcc[jp-1];

    // Add contributions to SV (right-hand-side)
    if (!SV.empty())
      for (i = 1; i <= nedof; i++)
      {
        int ieq = meen[i-1];
        int iceq = -ieq;
        if (ieq > 0)
          SV(ieq) -= c0*eM(i,j);
        else if (iceq > 0)
          for (ip = mpmceq[iceq-1]; ip < mpmceq[iceq]-1; ip++)
            if (mmceq[ip] > 0)
            {
              ieq = meqn[mmceq[ip]-1];
              SV(ieq) -= c0*ttcc[ip]*eM(i,j);
            }
      }

    // Add contributions to SM
    for (jp = mpmceq[jceq-1]; jp < mpmceq[jceq]-1; jp++)
      if (mmceq[jp] > 0)
      {
        int jeq = meqn[mmceq[jp]-1];
        for (i = 1; i <= nedof; i++)
        {
          int ieq = meen[i-1];
          int iceq = -ieq;
          if (ieq > 0)
          {
            SM(ieq,jeq) += ttcc[jp]*eM(i,j);
            SM(jeq,ieq) += ttcc[jp]*eM(j,i);
          }
          else if (iceq > 0)
            for (ip = mpmceq[iceq-1]; ip < mpmceq[iceq]-1; ip++)
              if (mmceq[ip] > 0)
              {
                ieq = meqn[mmceq[ip]-1];
                SM(ieq,jeq) += ttcc[ip]*ttcc[jp]*eM(i,j);
              }
        }
      }
  }
}
bool
ion::BuildDominatorTree(MIRGraph &graph)
{
    ComputeImmediateDominators(graph);

    // Traversing through the graph in post-order means that every use
    // of a definition is visited before the def itself. Since a def
    // dominates its uses, by the time we reach a particular
    // block, we have processed all of its dominated children, so
    // block->numDominated() is accurate.
    for (PostorderIterator i(graph.poBegin()); i != graph.poEnd(); i++) {
        MBasicBlock *child = *i;
        MBasicBlock *parent = child->immediateDominator();

        // If the block only self-dominates, it has no definite parent.
        if (child == parent)
            continue;

        if (!parent->addImmediatelyDominatedBlock(child))
            return false;

        // An additional +1 for the child block.
        parent->addNumDominated(child->numDominated() + 1);
    }

#ifdef DEBUG
    // If compiling with OSR, many blocks will self-dominate.
    // Without OSR, there is only one root block which dominates all.
    if (!graph.osrBlock())
        JS_ASSERT(graph.begin()->numDominated() == graph.numBlocks() - 1);
#endif
    // Now, iterate through the dominator tree and annotate every
    // block with its index in the pre-order traversal of the
    // dominator tree.
    Vector<MBasicBlock *, 1, IonAllocPolicy> worklist;

    // The index of the current block in the CFG traversal.
    size_t index = 0;

    // Add all self-dominating blocks to the worklist.
    // This includes all roots. Order does not matter.
    for (MBasicBlockIterator i(graph.begin()); i != graph.end(); i++) {
        MBasicBlock *block = *i;
        if (block->immediateDominator() == block) {
            if (!worklist.append(block))
                return false;
        }
    }
    // Starting from each self-dominating block, traverse the CFG in pre-order.
    while (!worklist.empty()) {
        MBasicBlock *block = worklist.popCopy();
        block->setDomIndex(index);

        for (size_t i = 0; i < block->numImmediatelyDominatedBlocks(); i++) {
            if (!worklist.append(block->getImmediatelyDominatedBlock(i)))
                return false;
        }
        index++;
    }

    return true;
}
Example #30
0
int SIMoutput::writeGlvS1 (const Vector& psol, int iStep, int& nBlock,
                           double time, const char* pvecName,
                           int idBlock, int psolComps, bool scalarOnly)
{
  if (psol.empty())
    return 0;
  else if (!myVtf)
    return -99;

  bool scalarEq = scalarOnly || this->getNoFields() == 1;
  size_t nVcomp = scalarEq ? 1 : this->getNoFields();
  if (nVcomp > this->getNoSpaceDim())
    nVcomp = this->getNoSpaceDim();

  bool haveXsol = false;
  if (mySol)
  {
    if (scalarEq)
      haveXsol = mySol->getScalarSol() != nullptr;
    else
      haveXsol = mySol->getVectorSol() != nullptr;
  }

  size_t nf = scalarEq ? 1 : this->getNoFields();
  size_t pMAX = haveXsol ? nf+nf : nf;
  std::vector<IntVec> sID(pMAX);
  std::array<IntVec,2> vID;
  Matrix field;
  Vector lovec;

  size_t i, j, k;
  int geomID = myGeomID;
  for (i = 0; i < myModel.size(); i++)
  {
    if (myModel[i]->empty()) continue; // skip empty patches

    if (msgLevel > 1)
      IFEM::cout <<"Writing primary solution for patch "<< i+1 << std::endl;

    // Evaluate primary solution variables

    myModel[i]->extractNodeVec(psol,lovec,psolComps,0);
    if (!myModel[i]->evalSolution(field,lovec,opt.nViz))
      return -1;

    myModel[i]->filterResults(field,myVtf->getBlock(++geomID));

    if (!scalarOnly && (nVcomp > 1 || !pvecName))
    {
      // Output as vector field
      if (!myVtf->writeVres(field,++nBlock,geomID,nVcomp))
        return -2;
      else
        vID[0].push_back(nBlock);
    }
    for (j = 1, k = 0; j <= field.rows() && k < pMAX; j++)
      if (!myVtf->writeNres(field.getRow(j),++nBlock,geomID))
        return -3;
      else
        sID[k++].push_back(nBlock);

    if (haveXsol)
    {
      if (msgLevel > 1)
        IFEM::cout <<"Writing exact solution for patch "<< i+1 << std::endl;

      // Evaluate exact primary solution

      const ElementBlock* grid = myVtf->getBlock(geomID);
      Vec3Vec::const_iterator cit = grid->begin_XYZ();
      field.fill(0.0);
      if (scalarEq)
      {
        const RealFunc& pSol = *mySol->getScalarSol();
        for (j = 1; cit != grid->end_XYZ() && haveXsol; j++, ++cit)
          field(1,j) = pSol(Vec4(*cit,time));
      }
      else
      {
        const VecFunc& pSol = *mySol->getVectorSol();
        for (j = 1; cit != grid->end_XYZ() && haveXsol; j++, ++cit)
          field.fillColumn(j,pSol(Vec4(*cit,time)).ptr());
        if (mySol->getScalarSol())
        {
          cit = grid->begin_XYZ();
          const RealFunc& sSol = *mySol->getScalarSol();
          for (j = 1; cit != grid->end_XYZ() && haveXsol; j++, ++cit)
            field(field.rows(),j) = sSol(Vec4(*cit,time));
        }
      }

      for (j = 1; j <= field.rows() && k < pMAX && haveXsol; j++)
        if (!myVtf->writeNres(field.getRow(j),++nBlock,geomID))
          return -3;
        else
          sID[k++].push_back(nBlock);

      if (!myVtf->writeVres(field,++nBlock,geomID,nVcomp))
        return -2;
      else
        vID[1].push_back(nBlock);
    }
  }

  // Write result block identifications

  bool ok = true;
  std::string pname(pvecName ? pvecName : "Solution");
  for (i = 0; i < 2 && ok; i++)
    if (!vID[i].empty())
    {
      std::string vname(i == 1 ? "Exact " + pname : pname);
      if (pvecName)
        ok = myVtf->writeVblk(vID[i],vname.c_str(),idBlock+i,iStep);
      else
        ok = myVtf->writeDblk(vID[i],vname.c_str(),idBlock+i,iStep);
    }

  if (idBlock <= (int)this->getNoSpaceDim())
    idBlock = this->getNoSpaceDim()+1; // Since we might have written BCs above

  std::vector<std::string> xname;
  if (haveXsol) xname.reserve(nf);
  if (nf > 1) pname += "_w";
  for (i = j = 0; i < nf && j < pMAX && !sID[j].empty() && ok; i++)
  {
    if (myProblem && (!pvecName || nf > nVcomp))
      pname = myProblem->getField1Name(i);
    else if (nf > 1)
      (*pname.rbegin()) ++;
    ok = myVtf->writeSblk(sID[j++],pname.c_str(),idBlock++,iStep);
    if (haveXsol) xname.push_back("Exact " + pname);
  }

  for (i = 0; i < xname.size() && j < pMAX && !sID[j].empty() && ok; i++)
    ok = myVtf->writeSblk(sID[j++],xname[i].c_str(),idBlock++,iStep);

  return ok ? idBlock : -4;
}