Ejemplo n.º 1
0
void emitDecRefWorkObj(Vout& v, Vreg obj) {
  auto const shouldRelease = v.makeReg();
  v << cmplim{1, obj[FAST_REFCOUNT_OFFSET], shouldRelease};
  ifThenElse(
    v, CC_E, shouldRelease,
    [&] (Vout& v) {
      // Put fn inside vcall{} triggers a compiler internal error (gcc 4.4.7)
      auto const fn = CallSpec::method(&ObjectData::release);
      v << vcall{fn, v.makeVcallArgs({{obj}}), v.makeTuple({})};
    },
    [&] (Vout& v) {
      emitDecRef(v, obj);
    }
  );
}
Ejemplo n.º 2
0
void
MaskImage::updateOutputs() {

	int width  = _image->width();
	int height = _image->height();

	_data.reshape(vigra::MultiArray<2, float>::size_type(width, height));

	vigra::combineTwoMultiArrays(
			srcMultiArrayRange(*_image),
			srcMultiArray(*_mask),
			destMultiArray(_data),
			ifThenElse(vigra::functor::Arg2() == vigra::functor::Param(1), vigra::functor::Arg1(), vigra::functor::Param(_maskValue)));

	*_masked = _data;
}
Ejemplo n.º 3
0
void cgProfileSwitchDest(IRLS& env, const IRInstruction* inst) {
  auto const extra = inst->extra<ProfileSwitchDest>();
  auto const idx = srcLoc(env, inst, 0).reg();
  auto& v = vmain(env);

  auto const rcase = v.makeReg();
  auto const sf = v.makeReg();
  v << subq{v.cns(extra->base), idx, rcase, v.makeReg()};
  v << cmpqi{extra->cases - 2, rcase, sf};

  ifThenElse(
    v, CC_AE, sf,
    [&] (Vout& v) {
      // Last vector element is the default case.
      v << inclm{rvmtl()[extra->handle + (extra->cases - 1) * sizeof(int32_t)],
                 v.makeReg()};
    },
    [&] (Vout& v) {
      v << inclm{Vreg{rvmtl()}[rcase * 4 + extra->handle], v.makeReg()};
    }
  );
}
Ejemplo n.º 4
0
// MAIN
int main(int argc, char* argv[]) {

    try
    {
        DataParams params(argc, argv);

        params.doSanityChecks();
        int stacksize = params.shape(2);
        Size2D size2 (params.shape(0), params.shape(1)); // isnt' there a slicing operator?


        if(params.getVerbose()) {
            std::cout << "Images with Shape: " << params.shape() << std::endl;
            std::cout << "Processing a stack of " << stacksize << " images..." << std::endl;
        }


        // found spots. One Vector over all images in stack
        // the inner set contains all spots in the image
        std::vector<std::set<Coord<float> > > res_coords(stacksize);

        DImage res((size2-Diff2D(1,1))*params.getFactor()+Diff2D(1,1));
        // check if outfile is writable, otherwise throw error -> exit
        exportImage(srcImageRange(res), ImageExportInfo(params.getOutFile().c_str()));
        std::ofstream cf;
        if(!params.getCoordsFile().empty()) {
            cf.open(params.getCoordsFile());
            vigra_precondition(cf.is_open(), "Could not open coordinate-file for writing.");
        }

        USE_NESTED_TICTOC;
        //USETICTOC;
        TICPUSH;  // measure the time

        // STORM Algorithmut

        CliProgressFunctor func;
        wienerStorm(params, res_coords, func);

        // resulting image
        drawCoordsToImage<Coord<float> >(res_coords, res);

        int numSpots = 0;
        if(cf.is_open()) {
            numSpots = saveCoordsFile(params, cf, res_coords);
            cf.close();
        }

        // end: done.
        std::cout << std::endl << TOCS << std::endl;
        std::cout << "detected " << numSpots << " spots." << std::endl;

        // some maxima are very strong so we scale the image as appropriate :
        double maxlim = 0., minlim = 0;
        findMinMaxPercentile(res, 0., minlim, 0.996, maxlim);
        std::cout << "cropping output values to range [" << minlim << ", " << maxlim << "]" << std::endl;
        if(maxlim > minlim) {
            transformImage(srcImageRange(res), destImage(res), ifThenElse(Arg1()>Param(maxlim), Param(maxlim), Arg1()));
        }
        exportImage(srcImageRange(res), ImageExportInfo(params.getOutFile().c_str()));
        if (!params.getSettingsFile().empty())
            params.save();


    }
    catch (vigra::StdException & e)
    {
        std::cout<<"There was an error:"<<std::endl;
        std::cout << e.what() << std::endl;
        return 1;
    }

    return 0;
}
Ejemplo n.º 5
0
void CodeGenerator::cgGuardRefs(IRInstruction* inst) {
  assert(inst->numSrcs() == 5);

  SSATmp* funcPtrTmp = inst->src(0);
  SSATmp* nParamsTmp = inst->src(1);
  SSATmp* firstBitNumTmp = inst->src(2);
  SSATmp* mask64Tmp  = inst->src(3);
  SSATmp* vals64Tmp  = inst->src(4);

  // Get values in place
  assert(funcPtrTmp->type() == Type::Func);
  auto funcPtrReg = x2a(curOpd(funcPtrTmp).reg());
  assert(funcPtrReg.IsValid());

  assert(nParamsTmp->type() == Type::Int);
  auto nParamsReg = x2a(curOpd(nParamsTmp).reg());
  assert(nParamsReg.IsValid() || nParamsTmp->isConst());

  assert(firstBitNumTmp->isConst() && firstBitNumTmp->type() == Type::Int);
  uint32_t firstBitNum = (uint32_t)(firstBitNumTmp->getValInt());

  assert(mask64Tmp->type() == Type::Int);
  assert(mask64Tmp->isConst());
  auto mask64Reg = x2a(curOpd(mask64Tmp).reg());
  assert(mask64Reg.IsValid() || mask64Tmp->inst()->op() != LdConst);
  uint64_t mask64 = mask64Tmp->getValInt();
  assert(mask64);

  assert(vals64Tmp->type() == Type::Int);
  assert(vals64Tmp->isConst());
  auto vals64Reg = x2a(curOpd(vals64Tmp).reg());
  assert(vals64Reg.IsValid() || vals64Tmp->inst()->op() != LdConst);
  uint64_t vals64 = vals64Tmp->getValInt();
  assert((vals64 & mask64) == vals64);

  auto const destSK = SrcKey(curFunc(), m_unit.bcOff());
  auto const destSR = m_tx64->getSrcRec(destSK);

  auto thenBody = [&] {
    auto bitsOff = sizeof(uint64_t) * (firstBitNum / 64);
    auto cond = CC_NE;
    auto bitsPtrReg = rAsm;

    if (firstBitNum == 0) {
      bitsOff = Func::refBitValOff();
      bitsPtrReg = funcPtrReg;
    } else {
      m_as.    Ldr  (bitsPtrReg, funcPtrReg[Func::sharedOff()]);
      bitsOff -= sizeof(uint64_t);
    }

    // Don't need the bits pointer after this point
    auto bitsReg = rAsm;
    // Load the bits
    m_as.    Ldr  (bitsReg, bitsPtrReg[bitsOff]);

    // Mask the bits. There are restrictions on what can be encoded as an
    // immediate in ARM's logical instructions, and if they're not met, we'll
    // have to use a register.
    if (vixl::Assembler::IsImmLogical(mask64, vixl::kXRegSize)) {
      m_as.  And  (bitsReg, bitsReg, mask64);
    } else {
      if (mask64Reg.IsValid()) {
        m_as.And  (bitsReg, bitsReg, mask64Reg);
      } else {
        m_as.Mov  (rAsm2, mask64);
        m_as.And  (bitsReg, bitsReg, rAsm2);
      }
    }

    // Now do the compare. There are also restrictions on immediates in
    // arithmetic instructions (of which Cmp is one; it's just a subtract that
    // sets flags), so same deal as with the mask immediate above.
    if (vixl::Assembler::IsImmArithmetic(vals64)) {
      m_as.  Cmp  (bitsReg, vals64);
    } else {
      if (vals64Reg.IsValid()) {
        m_as.Cmp  (bitsReg, vals64Reg);
      } else {
        m_as.Mov  (rAsm2, vals64);
        m_as.Cmp  (bitsReg, rAsm2);
      }
    }
    destSR->emitFallbackJump(m_mainCode, cond);
  };

  if (firstBitNum == 0) {
    assert(!nParamsReg.IsValid());
    // This is the first 64 bits. No need to check
    // nParams.
    thenBody();
  } else {
    assert(nParamsReg.IsValid());
    // Check number of args...
    m_as.    Cmp   (nParamsReg, firstBitNum);

    if (vals64 != 0 && vals64 != mask64) {
      // If we're beyond nParams, then either all params
      // are refs, or all params are non-refs, so if vals64
      // isn't 0 and isnt mask64, there's no possibility of
      // a match
      destSR->emitFallbackJump(m_mainCode, CC_LE);
      thenBody();
    } else {
      ifThenElse(m_as, vixl::gt, thenBody, /* else */ [&] {
          //   If not special builtin...
          m_as.  Ldr  (rAsm, funcPtrReg[Func::attrsOff()]);
          m_as.  Tst  (rAsm, AttrVariadicByRef);
          destSR->emitFallbackJump(m_mainCode, vals64 ? CC_Z : CC_NZ);
        });
    }
  }
}
Ejemplo n.º 6
0
void emitAwait(IRGS& env, int32_t numIters) {
  auto const resumeOffset = nextBcOff(env);
  assertx(curFunc(env)->isAsync());

  if (curFunc(env)->isAsyncGenerator()) PUNT(Await-AsyncGenerator);

  auto const exitSlow   = makeExitSlow(env);

  if (!topC(env)->isA(TObj)) PUNT(Await-NonObject);

  auto const child = popC(env);
  gen(env, JmpZero, exitSlow, gen(env, IsWaitHandle, child));

  // cns() would ODR-use these
  auto const kSucceeded = c_WaitHandle::STATE_SUCCEEDED;
  auto const kFailed    = c_WaitHandle::STATE_FAILED;

  auto const state = gen(env, LdWHState, child);

  /*
   * HHBBC may have proven something about the inner type of this wait handle.
   *
   * So, we may have an assertion on the type of the top of the stack after
   * this instruction.  We know the next bytecode instruction is reachable from
   * fallthrough on the Await, so if it is an AssertRATStk 0, anything coming
   * out of the wait handle must be a subtype of that type, so this is a safe
   * and conservative way to do this optimization (even if our successor
   * bytecode offset is a jump target from things we aren't thinking about
   * here).
   */
  auto const knownTy = [&] {
    auto pc = curUnit(env)->at(resumeOffset);
    if (*reinterpret_cast<const Op*>(pc) != Op::AssertRATStk) return TInitCell;
    ++pc;
    auto const stkLoc = decodeVariableSizeImm(&pc);
    if (stkLoc != 0) return TInitCell;
    auto const rat = decodeRAT(curUnit(env), pc);
    auto const ty = ratToAssertType(env, rat);
    return ty ? *ty : TInitCell;
  }();

  ifThenElse(
    env,
    [&] (Block* taken) {
      auto const succeeded = gen(env, EqInt, state, cns(env, kSucceeded));
      gen(env, JmpNZero, taken, succeeded);
    },
    [&] { // Next: the wait handle is not finished, we need to suspend
      auto const failed = gen(env, EqInt, state, cns(env, kFailed));
      gen(env, JmpNZero, exitSlow, failed);
      if (resumed(env)) {
        implAwaitR(env, child, resumeOffset);
      } else {
        implAwaitE(env, child, resumeOffset, numIters);
      }
    },
    [&] { // Taken: retrieve the result from the wait handle
      auto const res = gen(env, LdWHResult, knownTy, child);
      gen(env, IncRef, res);
      gen(env, DecRef, child);
      push(env, res);
    }
  );
}
Ejemplo n.º 7
0
void CodeGenerator::cgGuardRefs(IRInstruction* inst) {
  assert(inst->numSrcs() == 5);

  SSATmp* funcPtrTmp = inst->src(0);
  SSATmp* nParamsTmp = inst->src(1);
  SSATmp* firstBitNumTmp = inst->src(2);
  SSATmp* mask64Tmp  = inst->src(3);
  SSATmp* vals64Tmp  = inst->src(4);

  // Get values in place
  assert(funcPtrTmp->type() == Type::Func);
  auto funcPtrReg = x2a(m_regs[funcPtrTmp].reg());
  assert(funcPtrReg.IsValid());

  assert(nParamsTmp->type() == Type::Int);
  auto nParamsReg = x2a(m_regs[nParamsTmp].reg());
  assert(nParamsReg.IsValid() || nParamsTmp->isConst());

  assert(firstBitNumTmp->isConst() && firstBitNumTmp->type() == Type::Int);
  uint32_t firstBitNum = (uint32_t)(firstBitNumTmp->getValInt());

  assert(mask64Tmp->type() == Type::Int);
  assert(mask64Tmp->isConst());
  auto mask64Reg = x2a(m_regs[mask64Tmp].reg());
  assert(mask64Reg.IsValid() || mask64Tmp->inst()->op() != LdConst);
  uint64_t mask64 = mask64Tmp->getValInt();
  assert(mask64);

  assert(vals64Tmp->type() == Type::Int);
  assert(vals64Tmp->isConst());
  auto vals64Reg = x2a(m_regs[vals64Tmp].reg());
  assert(vals64Reg.IsValid() || vals64Tmp->inst()->op() != LdConst);
  uint64_t vals64 = vals64Tmp->getValInt();
  assert((vals64 & mask64) == vals64);

  auto const destSK = SrcKey(curFunc(), m_unit.bcOff());
  auto const destSR = m_tx64->getSrcRec(destSK);

  auto thenBody = [&] {
    auto bitsOff = sizeof(uint64_t) * (firstBitNum / 64);
    auto cond = CC_NE;
    auto bitsPtrReg = rAsm;

    if (firstBitNum == 0) {
      bitsOff = Func::refBitValOff();
      bitsPtrReg = funcPtrReg;
    } else {
      m_as.    Ldr  (bitsPtrReg, funcPtrReg[Func::sharedOff()]);
      bitsOff -= sizeof(uint64_t);
    }

    if (vals64 == 0 || (mask64 & (mask64 - 1)) == 0) {
      // If vals64 is zero, or we're testing a single
      // bit, we can get away with a single test,
      // rather than mask-and-compare
      m_as.    Ldr  (rAsm2, bitsPtrReg[bitsOff]);
      if (mask64Reg.IsValid()) {
        m_as.  Tst  (rAsm2, mask64Reg);
      } else {
        assert(vixl::Assembler::IsImmLogical(mask64, vixl::kXRegSize));
        m_as.  Tst  (rAsm2, mask64);
      }
      if (vals64) cond = CC_E;
    } else {
      auto bitsValReg = rAsm;
      m_as.    Ldr  (bitsValReg, bitsPtrReg[bitsOff]);
      if (debug) bitsPtrReg = Register();

      //     bitsValReg <- bitsValReg & mask64
      // NB: these 'And' ops don't set flags. They don't need to.
      if (mask64Reg.IsValid()) {
        m_as.  And    (bitsValReg, bitsValReg, mask64Reg);
      } else {
        // There are restrictions on the immediates that can be encoded into
        // logical ops. If the mask doesn't meet those restrictions, we have to
        // load it into a register first.
        if (vixl::Assembler::IsImmLogical(mask64, vixl::kXRegSize)) {
          m_as.And    (bitsValReg, bitsValReg, mask64);
        } else {
          m_as.Mov    (rAsm2, mask64);
          m_as.And    (bitsValReg, bitsValReg, rAsm2);
        }
      }

      //   If bitsValReg != vals64, then goto Exit
      if (vals64Reg.IsValid()) {
        m_as.  Cmp    (bitsValReg, vals64Reg);
      } else {
        m_as.  Cmp    (bitsValReg, vals64);
      }
    }
    destSR->emitFallbackJump(m_mainCode, cond);
  };

  if (firstBitNum == 0) {
    assert(!nParamsReg.IsValid());
    // This is the first 64 bits. No need to check
    // nParams.
    thenBody();
  } else {
    assert(nParamsReg.IsValid());
    // Check number of args...
    m_as.    Cmp   (nParamsReg, firstBitNum);

    if (vals64 != 0 && vals64 != mask64) {
      // If we're beyond nParams, then either all params
      // are refs, or all params are non-refs, so if vals64
      // isn't 0 and isnt mask64, there's no possibility of
      // a match
      destSR->emitFallbackJump(m_mainCode, CC_LE);
      thenBody();
    } else {
      ifThenElse(m_as, vixl::gt, thenBody, /* else */ [&] {
          //   If not special builtin...
          m_as.  Ldr  (rAsm, funcPtrReg[Func::attrsOff()]);
          m_as.  Tst  (rAsm, AttrVariadicByRef);
          destSR->emitFallbackJump(m_mainCode, vals64 ? CC_Z : CC_NZ);
        });
    }
  }
}