const OperandREG8 RegisterAllocator::r8(const OperandREF &ref, bool copy)
	{
		OperandREG32 reg = r32(ref, copy);

		// Make sure we only have al, cl, dl or bl
		if(reg.reg >= 4)
		{
			spill(reg);

			// Need to spill one of al, cl, dl or bl
			int candidate = 0;
			unsigned int priority = 0xFFFFFFFF;

			for(int i = 0; i < 4; i++)
			{
				if(GPR[i].priority < priority)
				{
					priority = GPR[i].priority;
					candidate = i;
				}
			}

			spill(OperandREG32(candidate));

			return (OperandREG8)allocate32(candidate, ref, copy, 1);
		}

		return (OperandREG8)reg;
	}
Beispiel #2
0
DocumentSource::GetNextResult DocumentSourceOut::getNext() {
    pExpCtx->checkForInterrupt();

    if (_done) {
        return GetNextResult::makeEOF();
    }

    if (!_initialized) {
        initialize();
    }

    // Insert all documents into temp collection, batching to perform vectored inserts.
    vector<BSONObj> bufferedObjects;
    int bufferedBytes = 0;

    auto nextInput = pSource->getNext();
    for (; nextInput.isAdvanced(); nextInput = pSource->getNext()) {
        BSONObj toInsert = nextInput.releaseDocument().toBson();

        bufferedBytes += toInsert.objsize();
        if (!bufferedObjects.empty() && (bufferedBytes > BSONObjMaxUserSize ||
                                         bufferedObjects.size() >= write_ops::kMaxWriteBatchSize)) {
            spill(bufferedObjects);
            bufferedObjects.clear();
            bufferedBytes = toInsert.objsize();
        }
        bufferedObjects.push_back(toInsert);
    }
    if (!bufferedObjects.empty())
        spill(bufferedObjects);

    switch (nextInput.getStatus()) {
        case GetNextResult::ReturnStatus::kAdvanced: {
            MONGO_UNREACHABLE;  // We consumed all advances above.
        }
        case GetNextResult::ReturnStatus::kPauseExecution: {
            return nextInput;  // Propagate the pause.
        }
        case GetNextResult::ReturnStatus::kEOF: {

            auto renameCommandObj =
                BSON("renameCollection" << _tempNs.ns() << "to" << _outputNs.ns() << "dropTarget"
                                        << true);

            auto status = pExpCtx->mongoProcessInterface->renameIfOptionsAndIndexesHaveNotChanged(
                pExpCtx->opCtx, renameCommandObj, _outputNs, _originalOutOptions, _originalIndexes);
            uassert(16997, str::stream() << "$out failed: " << status.reason(), status.isOK());

            // We don't need to drop the temp collection in our destructor if the rename succeeded.
            _tempNs = {};
            _done = true;

            // $out doesn't currently produce any outputs.
            return nextInput;
        }
    }
    MONGO_UNREACHABLE;
}
Beispiel #3
0
int
__signalcontext(ucontext_t *ucp, int sig, __sighandler_t *func)
{
	uint64_t *args, *bsp;
	siginfo_t *sig_si;
	ucontext_t *sig_uc;
	uint64_t sp;

	/* Bail out if we don't have a valid ucontext pointer. */
	if (ucp == NULL)
		abort();

	/*
	 * Build a signal frame and copy the arguments of signal handler
	 * 'func' onto the (memory) stack. We only need 3 arguments, but
	 * we create room for 4 so that we are 16-byte aligned.
	 */
	sp = (ucp->uc_mcontext.mc_special.sp - sizeof(ucontext_t)) & ~15UL;
	sig_uc = (ucontext_t*)sp;
	bcopy(ucp, sig_uc, sizeof(*sig_uc));
	sp = (sp - sizeof(siginfo_t)) & ~15UL;
	sig_si = (siginfo_t*)sp;
	bzero(sig_si, sizeof(*sig_si));
	sig_si->si_signo = sig;
	sp -= 4 * sizeof(uint64_t);
	args = (uint64_t*)sp;
	args[0] = sig;
	args[1] = (intptr_t)sig_si;
	args[2] = (intptr_t)sig_uc;

	/*
	 * Push (spill) the arguments of the context wrapper onto the register
	 * stack. They get loaded by the RSE on a context switch.
	 */
	bsp = (uint64_t*)ucp->uc_mcontext.mc_special.bspstore;
	bsp = spill(bsp, (intptr_t)ucp);
	bsp = spill(bsp, (intptr_t)func);
	bsp = spill(bsp, (intptr_t)args);

	/*
	 * Setup the ucontext of the signal handler.
	 */
	memset(&ucp->uc_mcontext, 0, sizeof(ucp->uc_mcontext));
	ucp->uc_link = sig_uc;
	sigdelset(&ucp->uc_sigmask, sig);
	ucp->uc_mcontext.mc_special.sp = (intptr_t)args - 16;
	ucp->uc_mcontext.mc_special.bspstore = (intptr_t)bsp;
	ucp->uc_mcontext.mc_special.pfs = (3 << 7) | 3;
	ucp->uc_mcontext.mc_special.rsc = 0xf;
	ucp->uc_mcontext.mc_special.rp = ((struct fdesc*)ctx_wrapper)->ip;
	ucp->uc_mcontext.mc_special.gp = ((struct fdesc*)ctx_wrapper)->gp;
	ucp->uc_mcontext.mc_special.fpsr = IA64_FPSR_DEFAULT;
	return (0);
}
Beispiel #4
0
void
__makecontext(ucontext_t *ucp, void (*func)(void), int argc, ...)
{
	uint64_t *args, *bsp;
	va_list ap;
	int i;

	/*
	 * Drop the ball completely if something's not right. We only
	 * support general registers as arguments and not more than 8
	 * of them. Things get hairy if we need to support FP registers
	 * (alignment issues) or more than 8 arguments (stack based).
	 */
	if (argc < 0 || argc > 8 || ucp == NULL ||
	    ucp->uc_stack.ss_sp == NULL || (ucp->uc_stack.ss_size & 15) ||
	    ((intptr_t)ucp->uc_stack.ss_sp & 15) ||
	    ucp->uc_stack.ss_size < MINSIGSTKSZ)
		abort();

	/*
	 * Copy the arguments of function 'func' onto the (memory) stack.
	 * Always take up space for 8 arguments.
	 */
	va_start(ap, argc);
	args = (uint64_t*)(ucp->uc_stack.ss_sp + ucp->uc_stack.ss_size) - 8;
	i = 0;
	while (i < argc)
		args[i++] = va_arg(ap, uint64_t);
	while (i < 8)
		args[i++] = 0;
	va_end(ap);

	/*
	 * Push (spill) the arguments of the context wrapper onto the register
	 * stack. They get loaded by the RSE on a context switch.
	 */
	bsp = (uint64_t*)ucp->uc_stack.ss_sp;
	bsp = spill(bsp, (intptr_t)ucp);
	bsp = spill(bsp, (intptr_t)func);
	bsp = spill(bsp, (intptr_t)args);

	/*
	 * Setup the MD portion of the context.
	 */
	memset(&ucp->uc_mcontext, 0, sizeof(ucp->uc_mcontext));
	ucp->uc_mcontext.mc_special.sp = (intptr_t)args - 16;
	ucp->uc_mcontext.mc_special.bspstore = (intptr_t)bsp;
	ucp->uc_mcontext.mc_special.pfs = (3 << 7) | 3;
	ucp->uc_mcontext.mc_special.rsc = 0xf;
	ucp->uc_mcontext.mc_special.rp = ((struct fdesc*)ctx_wrapper)->ip;
	ucp->uc_mcontext.mc_special.gp = ((struct fdesc*)ctx_wrapper)->gp;
	ucp->uc_mcontext.mc_special.fpsr = IA64_FPSR_DEFAULT;
}
    boost::optional<Document> DocumentSourceOut::getNext() {
        pExpCtx->checkForInterrupt();

        // make sure we only write out once
        if (_done)
            return boost::none;
        _done = true;

        verify(_mongod);
        DBClientBase* conn = _mongod->directClient();

        prepTempCollection();
        verify(_tempNs.size() != 0);

        vector<BSONObj> bufferedObjects;
        int bufferedBytes = 0;
        while (boost::optional<Document> next = pSource->getNext()) {
            BSONObj toInsert = next->toBson();
            bufferedBytes += toInsert.objsize();
            if (!bufferedObjects.empty() && bufferedBytes > BSONObjMaxUserSize) {
                spill(conn, bufferedObjects);
                bufferedObjects.clear();
                bufferedBytes = toInsert.objsize();
            }
            bufferedObjects.push_back(toInsert);
        }

        if (!bufferedObjects.empty())
            spill(conn, bufferedObjects);

        // Checking again to make sure we didn't become sharded while running.
        uassert(17018, str::stream() << "namespace '" << _outputNs.ns()
                                     << "' became sharded so it can't be used for $out'",
                !_mongod->isSharded(_outputNs));

        BSONObj rename = BSON("renameCollection" << _tempNs.ns()
                           << "to" << _outputNs.ns()
                           << "dropTarget" << true
                           );
        BSONObj info;
        bool ok = conn->runCommand("admin", rename, info);
        uassert(16997,  str::stream() << "renameCollection for $out failed: " << info,
                ok);

        // We don't need to drop the temp collection in our destructor if the rename succeeded.
        _tempNs = NamespaceString("");

        // This "DocumentSource" doesn't produce output documents. This can change in the future
        // if we support using $out in "tee" mode.
        return boost::none;
    }
Beispiel #6
0
Datei: gen.c Projekt: minux/subc
void commit(void) {
	if (Q_cmp != cnone) {
		commit_cmp();
		return;
	}
	if (Q_bool != bnone) {
		commit_bool();
		return;
	}
	if (empty == Q_type) return;
	spill();
	switch (Q_type) {
	case addr_auto:		cgldla(Q_val); break;
	case addr_static:	cgldsa(Q_val); break;
	case addr_globl:	cgldga(gsym(Q_name)); break;
	case addr_label:	cgldlab(Q_val); break;
	case literal:		cglit(Q_val); break;
	case auto_byte:		cgclear(); cgldlb(Q_val); break;
	case auto_word:		cgldlw(Q_val); break;
	case static_byte:	cgclear(); cgldsb(Q_val); break;
	case static_word:	cgldsw(Q_val); break;
	case globl_byte:	cgclear(); cgldgb(gsym(Q_name)); break;
	case globl_word:	cgldgw(gsym(Q_name)); break;
	default:		fatal("internal: unknown Q_type");
	}
	load();
	Q_type = empty;
}
Beispiel #7
0
/* Get the number of a free register.  If none are left, spill an unlocked
 * used register into a memory temporary.
 */
regnum getFreeRegnum() 
{
    // return first free non-reserved register (starting with eax)

    int i;
    for(i = FIRSTREG; i <= LASTREG; i++) {
        if(regTab[i].op == NULL) {
            return i;
        }
    }

    /* not enough regs - need to spill one into memtemp */
    // find the first used non-locked register r and spill its opdesc op
    // by calling spill(op, r);

    for(i = FIRSTREG; i <= LASTREG; i++) {
        if(!regTab[i].locked) {
            assert(regTab[i].op != NULL);
            spill(regTab[i].op, i);
            return i;
        }
    }

    assert(FALSE); /* should always find a register to spill... */
    return 0;
}
Beispiel #8
0
void
RegAlloc::cleanReg(PhysReg reg) {
  RegInfo* r = physRegToInfo(reg);
  if (r->m_state == RegInfo::DIRTY) {
    spill(r);
    stateTransition(r, RegInfo::CLEAN);
  }
}
Beispiel #9
0
void genpushlit(int n)
{

    gentext();
    commit();
    spill();
    cgpushlit(n);

}
Beispiel #10
0
 void TxnOplog::appendOp(BSONObj o) {
     _seq++;
     _m.push_back(o);
     _mem_size += o.objsize();
     if (_mem_size > _mem_limit) {
         spill();
         _spilled = true;
     }
 }
Beispiel #11
0
 void TxnOplog::rootCommit(GTID gtid, uint64_t timestamp, uint64_t hash) {
     if (_spilled) {
         // spill in memory ops if any
         spill();
         // log ref
         writeTxnRefToOplog(gtid, timestamp, hash);
     } else {
         writeOpsDirectlyToOplog(gtid, timestamp, hash);
     }
 }
Beispiel #12
0
static void clobber(Node p)
{
    assert(p);
    switch (generic(p->op))
    {
        case CALL:
            spill(TMP_REG, IREG, p);
            break;
    }
}
Beispiel #13
0
static Symbol getreg(Symbol s, unsigned mask[], Node p) {
	Symbol r = askreg(s, mask);
	if (r == NULL) {
		r = spillee(s, mask, p);
		assert(r && r->x.regnode);
		spill(r->x.regnode->mask, r->x.regnode->set, p);
		r = askreg(s, mask);
	}
	assert(r && r->x.regnode);
	r->x.regnode->vbl = NULL;
	return r;
}
Beispiel #14
0
// When all registers are in use, find a good interval to split and spill,
// which could be the current interval.  When an interval is split and the
// second part is spilled, possibly split the second part again before the
// next use-pos that requires a register, and enqueue the third part.
void Vxls::allocBlocked(Interval* current) {
  PhysReg::Map<unsigned> used, blocked;
  RegSet allow;
  unsigned conflict = constrain(current, allow); // repeated from allocate
  allow.forEach([&](PhysReg r) { used[r] = blocked[r] = conflict; });
  auto const cur_start = current->start();
  // compute next use of active registers, so we can pick the furthest one
  for (auto ivl : active) {
    if (ivl->fixed()) {
      blocked[ivl->reg] = used[ivl->reg] = 0;
    } else {
      auto use_pos = ivl->firstUseAfter(cur_start);
      used[ivl->reg] = std::min(use_pos, used[ivl->reg]);
    }
  }
  // compute next intersection/use of inactive regs to find whats free longest
  for (auto ivl : inactive) {
    auto intersect_pos = current->nextIntersect(ivl);
    if (intersect_pos == kMaxPos) continue;
    if (ivl->fixed()) {
      blocked[ivl->reg] = std::min(intersect_pos, blocked[ivl->reg]);
      used[ivl->reg] = std::min(blocked[ivl->reg], used[ivl->reg]);
    } else {
      auto use_pos = ivl->firstUseAfter(cur_start);
      used[ivl->reg] = std::min(use_pos, used[ivl->reg]);
    }
  }
  // choose the best victim register(s) to spill
  auto r = find(used);
  auto used_pos = used[r];
  if (used_pos < current->firstUse()) {
    // all other intervals are used before current's first register-use
    return spill(current);
  }
  auto block_pos = blocked[r];
  if (block_pos < current->end()) {
    auto prev_use = current->lastUseBefore(block_pos);
    auto min_split = std::max(prev_use, cur_start + 1);
    auto max_split = block_pos;
    assert(cur_start < min_split && min_split <= max_split);
    auto split_pos = std::max(min_split, max_split);
    split_pos = nearestSplitBefore(split_pos);
    if (split_pos > current->start()) {
      auto second = current->split(split_pos, true);
      pending.push(second);
    }
  }
  spillOthers(current, r);
  assignReg(current, r);
}
Beispiel #15
0
// chaitin's algorithm
void color_graph(graph* g, int n) {
    if (g == NULL) return;
    if (g->vertices == NULL) return;

    int size = g->size;
    int spilled = 0; // number of nodes not considered
    
    // simplify graph
    int simplified; // did we simplify the graph on this iteration?
    vertex* node;
    while (spilled < g->size) {
        // remove all nodes with degree < n
        do {
            simplified = 0;
            for (node = g->vertices; node != NULL; node = node->next) {
                if ((node->removed == 0) && (node->num_edges < n)) {
                    remove_node(node);
                    // push it onto the stack for coloring
                    size--;
                    push(node);
                    simplified = 1;
                }
            }//for
        } while ((size != 0) && (simplified == 1));

        // spill if we can't color this
        if (size == 0) {
            break;
        } else {
            empty_stack();
            spill(g); 
            spilled++;
            size = g->size - spilled;
        }
    }

    // color in reverse order of removal
    int i;
    while (!is_stack_empty()) {
        node = pop();
        reset_node(node);
        // use first available color
        for (i = 0; i < n; i++) {
            if (valid_node_color(node, i)) {
                node->color = i;
                break;
            }
        }//for
    }
}
Beispiel #16
0
Assembler::Register RegisterAllocator::spill(Assembler::Register* next_table, Assembler::Register& next) {
  // Use a round-robin strategy to spill the registers.
  const Register current = next;
  do {
    next = next_table[next];
    // Check to see whether or not the register is available for spilling.
    if (!is_referenced(next)) {
      // Spill the register.
      spill(next);
      // Return the register.
      return next;
    }
  } while(next != current);
  // Couldn't allocate a register without spilling.
  return Assembler::no_reg;
}
Beispiel #17
0
Assembler::Register RegisterAllocator::allocate(Assembler::Register reg) {
  // For now we allow registers that aren't handled by the register allocator
  // to be allocated. This might seem a bit strange.
  if (reg < (Assembler::Register)Assembler::number_of_registers) {
    GUARANTEE(!is_referenced(reg), "Cannot allocate referenced register");
    // Setup a reference to the newly allocated register.
    reference(reg);
    // Spill the register.
    spill(reg);

    //cse
    wipe_notation_of(reg);
  }
  // Return the register.
  return reg;
}
Beispiel #18
0
void Vxls::walkIntervals() {
  for (auto ivl : intervals) {
    if (!ivl) continue;
    if (ivl->fixed()) {
      assignReg(ivl, ivl->vreg);
    } else if (ivl->cns) {
      spill(ivl);
    } else {
      pending.push(ivl);
    }
  }
  while (!pending.empty()) {
    auto current = pending.top();
    pending.pop();
    update(current->start());
    allocate(current);
  }
}
Beispiel #19
0
LinearScan::RegState* LinearScan::getFreeReg(bool preferCallerSaved) {
  if (m_freeCallerSaved.empty() && m_freeCalleeSaved.empty()) {
    // no free registers --> free the first register in the allocatedRegs
    // list; this register is the one whose last use is the most distant
    ASSERT(!m_allocatedRegs.empty());

    // Pick the first register in <m_allocatedRegs> that is not used
    // for any source operand in the current instruction.
    auto isUnpinned = [&] (RegState* reg) { return !reg->isPinned(); };
    auto pos = std::find_if(m_allocatedRegs.begin(), m_allocatedRegs.end(),
                            isUnpinned);
    if (pos == m_allocatedRegs.end()) {
      PUNT(RegSpill);
    }
    spill((*pos)->m_ssaTmp);
  }

  std::list<RegState*>* preferred = NULL;
  std::list<RegState*>* other = NULL;
  if (preferCallerSaved) {
    preferred = &m_freeCallerSaved;
    other = &m_freeCalleeSaved;
  } else {
    preferred = &m_freeCalleeSaved;
    other = &m_freeCallerSaved;
  }

  RegState* theFreeReg = NULL;
  if (!preferred->empty()) {
    theFreeReg = popFreeReg(*preferred);
  } else {
    theFreeReg = popFreeReg(*other);
  }
  ASSERT(theFreeReg);
  // Pin it so that other operands in the same instruction will not reuse it.
  theFreeReg->m_pinned = true;
  return theFreeReg;
}
Beispiel #20
0
Assembler::Register
RegisterAllocator::allocate(Assembler::Register* next_table,
    Assembler::Register& next_alloc, Assembler::Register& next_spill) {
  Register reg = allocate_or_fail(next_table, next_alloc);
  if (reg == Assembler::no_reg) {
    reg = Compiler::current()->frame()->try_to_free_length_register();
    if ( reg != Assembler::no_reg) {
      return reg;
    }
    // Spill any suitable register.
    reg = spill(next_table, next_spill);
    // Make sure we got ourselves a proper register.
    GUARANTEE(reg != Assembler::no_reg, "Cannot allocate register when all registers are referenced");
    // Setup a reference to the newly allocated register.
    reference(reg);
  }

  //cse
  wipe_notation_of(reg);

  // Return the register.
  return reg;
}
void SpillOutputStream::flushBuffer(uint cbRequested)
{
    if (scratchPageLock.isLocked()) {
        assert(!pSegOutputStream);
        // grow from short to long
        spill();
    } else {
        assert(pSegOutputStream);
        assert(!scratchPageLock.isLocked());
        // already long
        assert(cbBuffer >= getBytesAvailable());
        pSegOutputStream->consumeWritePointer(cbBuffer - getBytesAvailable());
    }
    assert(pSegOutputStream);
    if (cbRequested) {
        PBuffer pBuffer =
            pSegOutputStream->getWritePointer(cbRequested,&cbBuffer);
        setBuffer(pBuffer, cbBuffer);
    } else {
        pSegOutputStream->hardPageBreak();
        cbBuffer = 0;
    }
}
	void RegisterAllocator::exclude(const OperandREG32 &r32)
	{
		spill(r32);
		prioritize32(r32.reg);
	}
Beispiel #23
0
// split ivl at pos and spill the second part.  If pos is too close
// to ivl->start(), spill all of ivl.
void Vxls::spillAfter(Interval* ivl, unsigned pos) {
  auto split_pos = nearestSplitBefore(pos);
  auto tail = split_pos <= ivl->start() ? ivl : ivl->split(split_pos);
  spill(tail);
}
Beispiel #24
0
Assembler::Register RegisterAllocator::allocate_float_register() {
#if ENABLE_ARM_VFP
#if 0
  // This change improve the caching of locals in registers.
  // But the more value that are cached in registers the more
  // values need to be written into memory at method call.
  // Find the best fit
  {
    const Register start = Register(_next_float_allocate & ~1);
    Register next = next_vfp_register(start, (Assembler::number_of_float_registers - 8));
    do {
      const bool f0 = !is_referenced(Register(next+0)) && !is_mapping_something(Register(next+0));
      const bool f1 = !is_referenced(Register(next+1)) && !is_mapping_something(Register(next+1));
      if ((f0 + f1) == 1) {
      Register r = next;
      if ( f0 == false) {
        r = Register(next + 1);
        }

       reference(r);

       return r;
      }
      next = next_vfp_register(next, 2);
    } while (next != start);
  }
#endif

  // Find a free register
  {
    const Register start = _next_float_allocate;
    Register next = start;

    do {
      if( !is_referenced(next) && !is_mapping_something(next)) {

        reference(next);

        _next_float_allocate = next_vfp_register(next, 1);
        return next;
      }
      next = next_vfp_register(next, 1);
    } while (next != start);
  }

  // Nothing free spill registers
  {
    const Register start = _next_float_spill;
    Register next = start;
    do {
      if (is_referenced(next)) {
        continue;
      }
#if ENABLE_INLINE
      if (Compiler::current()->conforming_frame()->not_null() &&
          Compiler::current()->conforming_frame()->is_mapping_something(next)) {
        continue;
      }
#endif // ENABLE_INLINE
      spill(next);

      reference(next);
      _next_float_spill = next_vfp_register(next, 1);
      return next;

    } while ((next = next_vfp_register(next, 1)) != start);
  }
  GUARANTEE(false, "Cannot allocate VFP registers for a float");
  return Assembler::no_reg;
#else  // !ENABLE_ARM_VFP
  return allocate(_next_register_table, _next_float_allocate, _next_float_spill);
#endif // ENABLE_ARM_VFP
}
Beispiel #25
0
Assembler::Register RegisterAllocator::allocate_double_register() {
  {
    const Register start = Register( next_vfp_register( _next_float_allocate, 1) & ~1);
    Register next = start;

    do {
      if (!is_referenced(Register(next + 0)) &&
        !is_referenced(Register(next + 1)) &&
          !is_mapping_something(Register(next + 0)) &&
        !is_mapping_something(Register(next + 1))) {

      reference(Register(next + 0));
        reference(Register(next + 1));

        _next_float_allocate = next_vfp_register(next, 2);
        return next;
      }
      next = next_vfp_register(next, 2);
    } while (next != start);
  }

#if 0
  // This change improve the caching of locals in registers.
  // But the more value that are cached in registers the more
  // values need to be written into memory at method call.
  // Try and find a half filled register pair
  {
    const Register start = Register((_next_float_spill) & ~1);
    Register next = next_vfp_register(start, 2);
    Register end = next_vfp_register(start, 8);
    do {
      if (is_referenced(Register(next+0)) || is_referenced(Register(next+1))) {
        continue;
      }
      const bool f0 = is_mapping_something(Register(next+0));
      const bool f1 = is_mapping_something(Register(next+1));

      if ((f0 + f1) == 1) {
        spill( f0 ? next : Register( next + 1) );

        reference(Register(next + 0));
        reference(Register(next + 1));
        _next_float_spill = next_vfp_register(next, 2);
      return next;
      }
      next = next_vfp_register(next, 2);
    } while (next != end);
  }
#endif

  // Nothing free spill registers
  {
    const Register start = Register( next_vfp_register( _next_float_spill, 1) & ~1);
    Register next = start;
    do {
      if (is_referenced(Register(next+0)) || is_referenced(Register(next+1))) {
        continue;
      }
#if ENABLE_INLINE
      if (Compiler::current()->conforming_frame()->not_null() &&
          (Compiler::current()->conforming_frame()->is_mapping_something(Register(next+0)) ||
            Compiler::current()->conforming_frame()->is_mapping_something(Register(next+1)))) {
        continue;
      }
#endif
      spill(Register(next+0));
      spill(Register(next+1));

      reference(Register(next+0));
      reference(Register(next+1));
      _next_float_spill = next_vfp_register(next, 2);
      return next;

    } while ((next = next_vfp_register(next, 2)) != start);
  }
  GUARANTEE(false, "Cannot allocate VFP registers for a double");
  return Assembler::no_reg;
}
    void DocumentSourceGroup::populate() {
        const size_t numAccumulators = vpAccumulatorFactory.size();
        dassert(numAccumulators == vpExpression.size());

        // pushed to on spill()
        vector<shared_ptr<Sorter<Value, Value>::Iterator> > sortedFiles;
        int memoryUsageBytes = 0;

        // This loop consumes all input from pSource and buckets it based on pIdExpression.
        while (boost::optional<Document> input = pSource->getNext()) {
            if (memoryUsageBytes > _maxMemoryUsageBytes) {
                uassert(16945, "Exceeded memory limit for $group, but didn't allow external sort."
                               " Pass allowDiskUse:true to opt in.",
                        _extSortAllowed);
                sortedFiles.push_back(spill());
                memoryUsageBytes = 0;
            }

            _variables->setRoot(*input);

            /* get the _id value */
            Value id = computeId(_variables.get());

            /* treat missing values the same as NULL SERVER-4674 */
            if (id.missing())
                id = Value(BSONNULL);

            /*
              Look for the _id value in the map; if it's not there, add a
              new entry with a blank accumulator.
            */
            const size_t oldSize = groups.size();
            vector<intrusive_ptr<Accumulator> >& group = groups[id];
            const bool inserted = groups.size() != oldSize;

            if (inserted) {
                memoryUsageBytes += id.getApproximateSize();

                // Add the accumulators
                group.reserve(numAccumulators);
                for (size_t i = 0; i < numAccumulators; i++) {
                    group.push_back(vpAccumulatorFactory[i]());
                }
            } else {
                for (size_t i = 0; i < numAccumulators; i++) {
                    // subtract old mem usage. New usage added back after processing.
                    memoryUsageBytes -= group[i]->memUsageForSorter();
                }
            }

            /* tickle all the accumulators for the group we found */
            dassert(numAccumulators == group.size());
            for (size_t i = 0; i < numAccumulators; i++) {
                group[i]->process(vpExpression[i]->evaluate(_variables.get()), _doingMerge);
                memoryUsageBytes += group[i]->memUsageForSorter();
            }

            // We are done with the ROOT document so release it.
            _variables->clearRoot();

            DEV {
                // In debug mode, spill every time we have a duplicate id to stress merge logic.
                if (!inserted // is a dup
                        && !pExpCtx->inRouter // can't spill to disk in router
                        && !_extSortAllowed // don't change behavior when testing external sort
                        && sortedFiles.size() < 20 // don't open too many FDs
                        ) {
                    sortedFiles.push_back(spill());
                }
            }
        }

        // These blocks do any final steps necessary to prepare to output results.
        if (!sortedFiles.empty()) {
            _spilled = true;
            if (!groups.empty()) {
                sortedFiles.push_back(spill());
            }

            // We won't be using groups again so free its memory.
            GroupsMap().swap(groups);

            _sorterIterator.reset(
                    Sorter<Value,Value>::Iterator::merge(
                        sortedFiles, SortOptions(), SorterComparator()));

            // prepare current to accumulate data
            _currentAccumulators.reserve(numAccumulators);
            for (size_t i = 0; i < numAccumulators; i++) {
                _currentAccumulators.push_back(vpAccumulatorFactory[i]());
            }

            verify(_sorterIterator->more()); // we put data in, we should get something out.
            _firstPartOfNextGroup = _sorterIterator->next();
        } else {
            // start the group iterator
            groupsIterator = groups.begin();
        }

        populated = true;
    }
Beispiel #27
0
 FOR_EACH_REG_IN_SET(r, regs) {
   if (r->m_state == RegInfo::DIRTY) {
     spill(r);
     stateTransition(r, RegInfo::CLEAN);
   }
 }
Beispiel #28
0
/* imc_reg_alloc is the main loop of the allocation algorithm. It operates
 * on a single compilation unit at a time.
 */
void
imc_reg_alloc(struct Parrot_Interp *interpreter, IMC_Unit * unit)
{
    int to_spill;
    int todo, first;

    if (!unit)
        return;
    if (!optimizer_level && pasm_file)
        return;

    init_tables(interpreter);
    allocated = 0;

#if IMC_TRACE
    fprintf(stderr, "reg_alloc.c: imc_reg_alloc\n");
    if (unit->instructions->r[1] && unit->instructions->r[1]->pcc_sub) {
        fprintf(stderr, "img_reg_alloc: pcc_sub (nargs = %d)\n",
            unit->instructions->r[1]->pcc_sub->nargs);
    }
#endif

    debug(interpreter, DEBUG_IMC, "\n------------------------\n");
    debug(interpreter, DEBUG_IMC, "processing sub %s\n", function);
    debug(interpreter, DEBUG_IMC, "------------------------\n\n");
    if (IMCC_INFO(interpreter)->verbose ||
            (IMCC_INFO(interpreter)->debug & DEBUG_IMC))
        imc_stat_init(unit);

    /* consecutive labels, if_branch, unused_labels ... */
    pre_optimize(interpreter, unit);
    if (optimizer_level == OPT_PRE && pasm_file)
        return;

    nodeStack = imcstack_new();
    unit->n_spilled = 0;

    todo = first = 1;
    while (todo) {
        find_basic_blocks(interpreter, unit, first);
        build_cfg(interpreter, unit);

        if (first && (IMCC_INFO(interpreter)->debug & DEBUG_CFG))
            dump_cfg(unit);
        first = 0;
        todo = cfg_optimize(interpreter, unit);
    }

    todo = first = 1;
    while (todo) {
        if (!first) {
            find_basic_blocks(interpreter, unit, 0);
            build_cfg(interpreter, unit);
        }
        first = 0;

        compute_dominators(interpreter, unit);
        find_loops(interpreter, unit);

        build_reglist(interpreter, unit);
        life_analysis(interpreter, unit);
        /* optimize, as long as there is something to do */
        if (dont_optimize)
            todo = 0;
        else {
            todo = optimize(interpreter, unit);
            if (todo)
                pre_optimize(interpreter, unit);
        }
    }
    todo = 1;
#if !DOIT_AGAIN_SAM
    build_interference_graph(interpreter, unit);
#endif
    while (todo) {
#if DOIT_AGAIN_SAM
        build_interference_graph(interpreter, unit);
#endif
        if (optimizer_level & OPT_SUB)
            allocate_wanted_regs(unit);
        compute_spilling_costs(interpreter, unit);
#ifdef DO_SIMPLIFY
        /* simplify until no changes can be made */
        while (simplify(unit)) {}
#endif
        order_spilling(unit);          /* put the remaining items on stack */

        to_spill = try_allocate(interpreter, unit);
        allocated = 1;

        if ( to_spill >= 0 ) {
            allocated = 0;
            spill(interpreter, unit, to_spill);
            /*
             * build the new cfg/reglist on the fly in spill() and
             * do life analysis there for only the involved regs
             */
#if DOIT_AGAIN_SAM
            find_basic_blocks(interpreter, unit, 0);
            build_cfg(interpreter, unit);
            build_reglist(interpreter, unit);
            life_analysis(interpreter);
#endif
        }
        else {
            /* the process is finished */
            todo = 0;
        }
    }
    if (optimizer_level & OPT_SUB)
        sub_optimize(interpreter, unit);
    if (IMCC_INFO(interpreter)->debug & DEBUG_IMC)
        dump_instructions(unit);
    if (IMCC_INFO(interpreter)->verbose  ||
            (IMCC_INFO(interpreter)->debug & DEBUG_IMC))
        print_stat(interpreter, unit);
    imcstack_free(nodeStack);
}
void DocumentSourceGroup::initialize() {
    _initialized = true;
    const size_t numAccumulators = vpAccumulatorFactory.size();

    boost::optional<BSONObj> inputSort = findRelevantInputSort();
    if (inputSort) {
        // We can convert to streaming.
        _streaming = true;
        _inputSort = *inputSort;

        // Set up accumulators.
        _currentAccumulators.reserve(numAccumulators);
        for (size_t i = 0; i < numAccumulators; i++) {
            _currentAccumulators.push_back(vpAccumulatorFactory[i]());
            _currentAccumulators.back()->injectExpressionContext(pExpCtx);
        }

        // We only need to load the first document.
        _firstDocOfNextGroup = pSource->getNext();

        if (!_firstDocOfNextGroup) {
            return;
        }

        _variables->setRoot(*_firstDocOfNextGroup);

        // Compute the _id value.
        _currentId = computeId(_variables.get());
        return;
    }

    dassert(numAccumulators == vpExpression.size());

    // pushed to on spill()
    vector<shared_ptr<Sorter<Value, Value>::Iterator>> sortedFiles;
    int memoryUsageBytes = 0;

    // This loop consumes all input from pSource and buckets it based on pIdExpression.
    while (boost::optional<Document> input = pSource->getNext()) {
        if (memoryUsageBytes > _maxMemoryUsageBytes) {
            uassert(16945,
                    "Exceeded memory limit for $group, but didn't allow external sort."
                    " Pass allowDiskUse:true to opt in.",
                    _extSortAllowed);
            sortedFiles.push_back(spill());
            memoryUsageBytes = 0;
        }

        _variables->setRoot(*input);

        /* get the _id value */
        Value id = computeId(_variables.get());

        /*
          Look for the _id value in the map; if it's not there, add a
          new entry with a blank accumulator.
        */
        const size_t oldSize = _groups->size();
        vector<intrusive_ptr<Accumulator>>& group = (*_groups)[id];
        const bool inserted = _groups->size() != oldSize;

        if (inserted) {
            memoryUsageBytes += id.getApproximateSize();

            // Add the accumulators
            group.reserve(numAccumulators);
            for (size_t i = 0; i < numAccumulators; i++) {
                group.push_back(vpAccumulatorFactory[i]());
                group.back()->injectExpressionContext(pExpCtx);
            }
        } else {
            for (size_t i = 0; i < numAccumulators; i++) {
                // subtract old mem usage. New usage added back after processing.
                memoryUsageBytes -= group[i]->memUsageForSorter();
            }
        }

        /* tickle all the accumulators for the group we found */
        dassert(numAccumulators == group.size());
        for (size_t i = 0; i < numAccumulators; i++) {
            group[i]->process(vpExpression[i]->evaluate(_variables.get()), _doingMerge);
            memoryUsageBytes += group[i]->memUsageForSorter();
        }

        // We are done with the ROOT document so release it.
        _variables->clearRoot();

        if (kDebugBuild && !storageGlobalParams.readOnly) {
            // In debug mode, spill every time we have a duplicate id to stress merge logic.
            if (!inserted  // is a dup
                &&
                !pExpCtx->inRouter  // can't spill to disk in router
                &&
                !_extSortAllowed  // don't change behavior when testing external sort
                &&
                sortedFiles.size() < 20  // don't open too many FDs
                ) {
                sortedFiles.push_back(spill());
            }
        }
    }

    // These blocks do any final steps necessary to prepare to output results.
    if (!sortedFiles.empty()) {
        _spilled = true;
        if (!_groups->empty()) {
            sortedFiles.push_back(spill());
        }

        // We won't be using groups again so free its memory.
        _groups = pExpCtx->getValueComparator().makeUnorderedValueMap<Accumulators>();

        _sorterIterator.reset(Sorter<Value, Value>::Iterator::merge(
            sortedFiles, SortOptions(), SorterComparator(pExpCtx->getValueComparator())));

        // prepare current to accumulate data
        _currentAccumulators.reserve(numAccumulators);
        for (size_t i = 0; i < numAccumulators; i++) {
            _currentAccumulators.push_back(vpAccumulatorFactory[i]());
            _currentAccumulators.back()->injectExpressionContext(pExpCtx);
        }

        verify(_sorterIterator->more());  // we put data in, we should get something out.
        _firstPartOfNextGroup = _sorterIterator->next();
    } else {
        // start the group iterator
        groupsIterator = _groups->begin();
    }
}
Beispiel #30
0
static void target(Node p)
{
    /*
     debug({
     fprintf(stderr, "target called on %x (%s)\n", p, opname(p->op));
     if (p->syms[RX])
     fprintf(stderr, "    sclass: %d, name: %s\n", p->syms[RX]->sclass, p->syms[RX]->name);
     if (p->kids[0]) {
     fprintf(stderr, "    %x (%s)\n", p->kids[0], opname(p->kids[0]->op));
     if (p->kids[0]->syms[RX])
     fprintf(stderr, "        sclass: %d, name: %s\n", p->kids[0]->syms[RX]->sclass, p->kids[0]->syms[RX]->name);
     }
     if (p->kids[1]) {
     fprintf(stderr, "    %x (%s)\n", p->kids[1], opname(p->kids[1]->op));
     if (p->kids[1]->syms[RX])
     fprintf(stderr, "        sclass: %d, name: %s\n", p->kids[1]->syms[RX]->sclass, p->kids[1]->syms[RX]->name);
     }
     });
     */
    assert(p);
    switch (specific(p->op))
    {
        case RET + F:
        case RET + I:
        case RET + U:
        case RET + P:
            rtarget(p, 0, reg[RGA]);
            break;
        case CALL + F:
        case CALL + I:
        case CALL + U:
        case CALL + P:
            setreg(p, reg[RGA]);
            break;
        case ARG + F:
        case ARG + I:
        case ARG + U:
        case ARG + P:
            switch (p->x.argno)
            {
                case 0:
                    debug(
                            fprintf(stderr, "target called on ARG with argno = %d, targetting A\n", p->x.argno));
                    spill(1 << RGA, IREG, p);
                    rtarget(p, 0, reg[RGA]);
                    break;
                case 1:
                    debug(
                            fprintf(stderr, "target called on ARG with argno = %d, targetting B\n", p->x.argno));
                    spill(1 << RGB, IREG, p);
                    rtarget(p, 0, reg[RGB]);
                    break;
                case 2:
                    debug(
                            fprintf(stderr, "target called on ARG with argno = %d, targetting C\n", p->x.argno));
                    spill(1 << RGC, IREG, p);
                    rtarget(p, 0, reg[RGC]);
                    break;
                default:
                    debug(
                            fprintf(stderr, "target called on ARG with argno = %d, skipping\n", p->x.argno));
            }
            break;
    }

    /*
     debug({
     fprintf(stderr, "target returning on %x (%s)\n", p, opname(p->op));
     if (p->syms[RX])
     fprintf(stderr, "    sclass: %d, name: %s\n", p->syms[RX]->sclass, p->syms[RX]->name);
     if (p->kids[0]) {
     fprintf(stderr, "    %x (%s)\n", p->kids[0], opname(p->kids[0]->op));
     if (p->kids[0]->syms[RX])
     fprintf(stderr, "        sclass: %d, name: %s\n", p->kids[0]->syms[RX]->sclass, p->kids[0]->syms[RX]->name);
     }
     if (p->kids[1]) {
     fprintf(stderr, "    %x (%s)\n", p->kids[1], opname(p->kids[1]->op));
     if (p->kids[1]->syms[RX])
     fprintf(stderr, "        sclass: %d, name: %s\n", p->kids[1]->syms[RX]->sclass, p->kids[1]->syms[RX]->name);
     }
     });
     */
}