Example #1
0
/*
 * Allocate an unassigned memnode.
 */
int
mem_node_alloc()
{
	int mnode;
	mnodeset_t newmask, oldmask;

	/*
	 * Find an unused memnode.  Update it atomically to prevent
	 * a first time memnode creation race.
	 */
	for (mnode = 0; mnode < max_mem_nodes; mnode++)
		if (atomic_cas_32((uint32_t *)&mem_node_config[mnode].exists,
		    0, 1) == 0)
			break;

	if (mnode >= max_mem_nodes)
			panic("Out of free memnodes\n");

	mem_node_config[mnode].physbase = (uint64_t)-1;
	mem_node_config[mnode].physmax = 0;
	atomic_inc_16(&num_memnodes);
	do {
		oldmask = memnodes_mask;
		newmask = memnodes_mask | (1ull << mnode);
	} while (atomic_cas_64(&memnodes_mask, oldmask, newmask) != oldmask);

	return (mnode);
}
Example #2
0
/*
 * An interrupt thread is returning from swtch(). Place a starting timestamp
 * in its thread structure.
 */
void
cpu_intr_swtch_exit(kthread_id_t t)
{
	uint64_t ts;

	ASSERT((t->t_flag & T_INTR_THREAD) != 0);
	ASSERT(t->t_pil > 0 && t->t_pil <= LOCK_LEVEL);

	do {
		ts = t->t_intr_start;
	} while (atomic_cas_64(&t->t_intr_start, ts, CLOCK_TICK_COUNTER()) !=
	    ts);
}
Example #3
0
/*
 * Remove a PFN range from a memnode.  On some platforms,
 * the memnode will be created with physbase at the first
 * allocatable PFN, but later deleted with the MC slice
 * base address converted to a PFN, in which case we need
 * to assume physbase and up.
 */
void
mem_node_del_slice(pfn_t start, pfn_t end)
{
	int mnode;
	pgcnt_t delta_pgcnt, node_size;
	mnodeset_t omask, nmask;

	if (mem_node_physalign) {
		start &= ~(btop(mem_node_physalign) - 1);
		end = roundup(end, btop(mem_node_physalign)) - 1;
	}
	mnode = PFN_2_MEM_NODE(start);

	ASSERT(mnode < max_mem_nodes);
	ASSERT(mem_node_config[mnode].exists == 1);

	delta_pgcnt = end - start;
	node_size = mem_node_config[mnode].physmax -
	    mem_node_config[mnode].physbase;

	if (node_size > delta_pgcnt) {
		/*
		 * Subtract the slice from the memnode.
		 */
		if (start <= mem_node_config[mnode].physbase)
			mem_node_config[mnode].physbase = end + 1;
		ASSERT(end <= mem_node_config[mnode].physmax);
		if (end == mem_node_config[mnode].physmax)
			mem_node_config[mnode].physmax = start - 1;
	} else {

		/*
		 * Let the common lgrp framework know the mnode is
		 * leaving
		 */
		lgrp_config(LGRP_CONFIG_MEM_DEL, mnode,
		    MEM_NODE_2_LGRPHAND(mnode));

		/*
		 * Delete the whole node.
		 */
		ASSERT(MNODE_PGCNT(mnode) == 0);
		do {
			omask = memnodes_mask;
			nmask = omask & ~(1ull << mnode);
		} while (atomic_cas_64(&memnodes_mask, omask, nmask) != omask);
		atomic_dec_16(&num_memnodes);
		mem_node_config[mnode].exists = 0;
	}
}
Example #4
0
/*
 * Adjust the memnode config after a DR operation.
 *
 * It is rather tricky to do these updates since we can't
 * protect the memnode structures with locks, so we must
 * be mindful of the order in which updates and reads to
 * these values can occur.
 */
void
mem_node_add_slice(pfn_t start, pfn_t end)
{
	int mnode;
	mnodeset_t newmask, oldmask;

	/*
	 * DR will pass us the first pfn that is allocatable.
	 * We need to round down to get the real start of
	 * the slice.
	 */
	if (mem_node_physalign) {
		start &= ~(btop(mem_node_physalign) - 1);
		end = roundup(end, btop(mem_node_physalign)) - 1;
	}

	mnode = PFN_2_MEM_NODE(start);
	ASSERT(mnode < max_mem_nodes);

	if (atomic_cas_32((uint32_t *)&mem_node_config[mnode].exists, 0, 1)) {
		/*
		 * Add slice to existing node.
		 */
		if (start < mem_node_config[mnode].physbase)
			mem_node_config[mnode].physbase = start;
		if (end > mem_node_config[mnode].physmax)
			mem_node_config[mnode].physmax = end;
	} else {
		mem_node_config[mnode].physbase = start;
		mem_node_config[mnode].physmax = end;
		atomic_inc_16(&num_memnodes);
		do {
			oldmask = memnodes_mask;
			newmask = memnodes_mask | (1ull << mnode);
		} while (atomic_cas_64(&memnodes_mask, oldmask, newmask) !=
			 oldmask);
	}
	/*
	 * Let the common lgrp framework know about the new memory
	 */
	lgrp_config(LGRP_CONFIG_MEM_ADD, mnode, MEM_NODE_2_LGRPHAND(mnode));
}
Example #5
0
void block_bits::release_contiguous(size_t index, size_t chip_count) {
    // assign this chip to the zombie set for later recycling
    (void) chip_count; // keep gcc happy
    assert(index < chip_count);
    bitmap to_free = bitmap(1) << index;
    assert(! (to_free & *usable_chips()));
    membar_exit();
    bitmap volatile* ptr = &_zombie_chips;
    bitmap ov = *ptr;
    while(1) {
        bitmap nv = ov | to_free;
        bitmap cv = atomic_cas_64(ptr, ov, nv);
        if(cv == ov)
            break;
        ov = cv;
    }
    bitmap was_free = ov;
    (void) was_free; // keep gcc happy

    assert( ! (was_free & to_free));
}
Example #6
0
void block_bits::recycle() {
    /* recycle the zombies in the block.

       Whatever bits have gone zombie since we last recycled become
       OR-ed into the set of usable bits. We also XOR them atomically back
       into the zombie set to clear them out there. That way we don't
       leak bits if a releasing thread races us and adds more bits to the
       zombie set after we read it.
    */
    bitmap newly_usable = *&_zombie_chips;
    _usable_chips |= newly_usable;
    membar_exit();
    bitmap volatile* ptr = &_zombie_chips;
    bitmap ov = *ptr;
    while(1) {
        bitmap nv = ov ^ newly_usable; // XOR
        bitmap cv = atomic_cas_64(ptr, ov, nv);
        if(cv == ov)
            break;
        ov = cv;
    }
}
Example #7
0
/*
 * An interrupt thread is ending a time slice, so compute the interval it
 * ran for and update the statistic for its PIL.
 */
void
cpu_intr_swtch_enter(kthread_id_t t)
{
	uint64_t	interval;
	uint64_t	start;
	cpu_t		*cpu;

	ASSERT((t->t_flag & T_INTR_THREAD) != 0);
	ASSERT(t->t_pil > 0 && t->t_pil <= LOCK_LEVEL);

	/*
	 * We could be here with a zero timestamp. This could happen if:
	 * an interrupt thread which no longer has a pinned thread underneath
	 * it (i.e. it blocked at some point in its past) has finished running
	 * its handler. intr_thread() updated the interrupt statistic for its
	 * PIL and zeroed its timestamp. Since there was no pinned thread to
	 * return to, swtch() gets called and we end up here.
	 *
	 * It can also happen if an interrupt thread in intr_thread() calls
	 * preempt. It will have already taken care of updating stats. In
	 * this event, the interrupt thread will be runnable.
	 */
	if (t->t_intr_start) {
		do {
			start = t->t_intr_start;
			interval = CLOCK_TICK_COUNTER() - start;
		} while (atomic_cas_64(&t->t_intr_start, start, 0) != start);
		cpu = CPU;
		if (cpu->cpu_m.divisor > 1)
			interval *= cpu->cpu_m.divisor;
		cpu->cpu_m.intrstat[t->t_pil][0] += interval;

		atomic_add_64((uint64_t *)&cpu->cpu_intracct[cpu->cpu_mstate],
		    interval);
	} else
		ASSERT(t->t_intr == NULL || t->t_state == TS_RUN);
}
	/* FIXME: check for 64 bit mode */
	static inline int64_t
	fenced_compare_exchange_strong_64(int64_t *ptr, int64_t expected, int64_t desired)
	{
		return atomic_cas_64((volatile unsigned long long*)ptr, expected, desired);
	}
template<typename T> static T cas(volatile T *ptr, T oldval, T newval) { return atomic_cas_64(ptr, oldval, newval); }