示例#1
0
文件: runtime.c 项目: hfeeki/golang
static void
TestAtomic64(void)
{
	uint64 z64, x64;

	z64 = 42;
	x64 = 0;
	PREFETCH(&z64);
	if(runtime·cas64(&z64, &x64, 1))
		runtime·throw("cas64 failed");
	if(x64 != 42)
		runtime·throw("cas64 failed");
	if(!runtime·cas64(&z64, &x64, 1))
		runtime·throw("cas64 failed");
	if(x64 != 42 || z64 != 1)
		runtime·throw("cas64 failed");
	if(runtime·atomicload64(&z64) != 1)
		runtime·throw("load64 failed");
	runtime·atomicstore64(&z64, (1ull<<40)+1);
	if(runtime·atomicload64(&z64) != (1ull<<40)+1)
		runtime·throw("store64 failed");
	if(runtime·xadd64(&z64, (1ull<<40)+1) != (2ull<<40)+2)
		runtime·throw("xadd64 failed");
	if(runtime·atomicload64(&z64) != (2ull<<40)+2)
		runtime·throw("xadd64 failed");
	if(runtime·xchg64(&z64, (3ull<<40)+3) != (2ull<<40)+2)
		runtime·throw("xchg64 failed");
	if(runtime·atomicload64(&z64) != (3ull<<40)+3)
		runtime·throw("xchg64 failed");
}
示例#2
0
文件: rdc_op.c 项目: bringhurst/tmpi
void rdc_dbl_min(void *result, void *source, int count)
{
	double *res=(double *)result;
	double *src=(double *)source;
	union {
		double f;
		long long i;
	} tmp1, tmp2;
	register int i;

	if (sizeof(double)==8) {
		for (i=0; i<count; i++) {
			do {
				tmp1.f=res[i];
				tmp2.f=src[i];
	
				if (tmp2.f>=tmp1.f)
					break;
			} while (!cas64((long long *)&(res[i]), tmp1.i, tmp2.i));
		}
	}
	else {
		tmpi_error(DBG_INTERNAL, "Architecture assumption failed, double size not equal to 4!");
	}
}
示例#3
0
/*
 * An interrupt thread is ending a time slice, so compute the interval it
 * ran for and update the statistic for its PIL.
 */
void
cpu_intr_swtch_enter(kthread_id_t t)
{
	uint64_t	interval;
	uint64_t	start;
	cpu_t		*cpu;

	ASSERT((t->t_flag & T_INTR_THREAD) != 0);
	ASSERT(t->t_pil > 0 && t->t_pil <= LOCK_LEVEL);

	/*
	 * We could be here with a zero timestamp. This could happen if:
	 * an interrupt thread which no longer has a pinned thread underneath
	 * it (i.e. it blocked at some point in its past) has finished running
	 * its handler. intr_thread() updated the interrupt statistic for its
	 * PIL and zeroed its timestamp. Since there was no pinned thread to
	 * return to, swtch() gets called and we end up here.
	 *
	 * Note that we use atomic ops below (cas64 and atomic_add_64), which
	 * we don't use in the functions above, because we're not called
	 * with interrupts blocked, but the epilog/prolog functions are.
	 */
	if (t->t_intr_start) {
		do {
			start = t->t_intr_start;
			interval = tsc_read() - start;
		} while (cas64(&t->t_intr_start, start, 0) != start);
		cpu = CPU;
		cpu->cpu_m.intrstat[t->t_pil][0] += interval;

		atomic_add_64((uint64_t *)&cpu->cpu_intracct[cpu->cpu_mstate],
		    interval);
	} else
		ASSERT(t->t_intr == NULL);
}
示例#4
0
/*
 * Allocate an unassigned memnode.
 */
int
mem_node_alloc()
{
	int mnode;
	mnodeset_t newmask, oldmask;

	/*
	 * Find an unused memnode.  Update it atomically to prevent
	 * a first time memnode creation race.
	 */
	for (mnode = 0; mnode < max_mem_nodes; mnode++)
		if (cas32((uint32_t *)&mem_node_config[mnode].exists,
		    0, 1) == 0)
			break;

	if (mnode >= max_mem_nodes)
			panic("Out of free memnodes\n");

	mem_node_config[mnode].physbase = (uint64_t)-1;
	mem_node_config[mnode].physmax = 0;
	atomic_add_16(&num_memnodes, 1);
	do {
		oldmask = memnodes_mask;
		newmask = memnodes_mask | (1ull << mnode);
	} while (cas64(&memnodes_mask, oldmask, newmask) != oldmask);

	return (mnode);
}
示例#5
0
uint64
runtime·xadd64(uint64 volatile* addr, int64 v)
{
	uint64 old;

	old = *addr;
	while(!runtime·cas64(addr, &old, old+v)) {
		// nothing
	}
	return old+v;
}
示例#6
0
/*
 * Atomically increment a counter
 */
void
bge_atomic_renounce(uint64_t *count_p, uint64_t n)
{
	uint64_t oldval;
	uint64_t newval;

	/* ATOMICALLY */
	do {
		oldval = *count_p;
		newval = oldval + n;
	} while (cas64(count_p, oldval, newval) != oldval);
}
示例#7
0
/*
 * An interrupt thread is returning from swtch(). Place a starting timestamp
 * in its thread structure.
 */
void
cpu_intr_swtch_exit(kthread_id_t t)
{
	uint64_t ts;

	ASSERT((t->t_flag & T_INTR_THREAD) != 0);
	ASSERT(t->t_pil > 0 && t->t_pil <= LOCK_LEVEL);

	do {
		ts = t->t_intr_start;
	} while (cas64(&t->t_intr_start, ts, tsc_read()) != ts);
}
示例#8
0
/*
 * Atomically clear bits in a 64-bit word, returning
 * the value it had *before* the bits were cleared.
 */
uint64_t
bge_atomic_clr64(uint64_t *sp, uint64_t bits)
{
	uint64_t oldval;
	uint64_t newval;

	/* ATOMICALLY */
	do {
		oldval = *sp;
		newval = oldval & ~bits;
	} while (cas64(sp, oldval, newval) != oldval);

	return (oldval);
}
示例#9
0
/*
 * Atomically claim a slot in a descriptor ring
 */
uint64_t
bge_atomic_claim(uint64_t *count_p, uint64_t limit)
{
	uint64_t oldval;
	uint64_t newval;

	/* ATOMICALLY */
	do {
		oldval = *count_p;
		newval = NEXT(oldval, limit);
	} while (cas64(count_p, oldval, newval) != oldval);

	return (oldval);
}
示例#10
0
/*
 * Remove a PFN range from a memnode.  On some platforms,
 * the memnode will be created with physbase at the first
 * allocatable PFN, but later deleted with the MC slice
 * base address converted to a PFN, in which case we need
 * to assume physbase and up.
 */
void
mem_node_del_slice(pfn_t start, pfn_t end)
{
	int mnode;
	pgcnt_t delta_pgcnt, node_size;
	mnodeset_t omask, nmask;

	if (mem_node_physalign) {
		start &= ~(btop(mem_node_physalign) - 1);
		end = roundup(end, btop(mem_node_physalign)) - 1;
	}
	mnode = PFN_2_MEM_NODE(start);

	ASSERT(mnode < max_mem_nodes);
	ASSERT(mem_node_config[mnode].exists == 1);

	delta_pgcnt = end - start;
	node_size = mem_node_config[mnode].physmax -
	    mem_node_config[mnode].physbase;

	if (node_size > delta_pgcnt) {
		/*
		 * Subtract the slice from the memnode.
		 */
		if (start <= mem_node_config[mnode].physbase)
			mem_node_config[mnode].physbase = end + 1;
		ASSERT(end <= mem_node_config[mnode].physmax);
		if (end == mem_node_config[mnode].physmax)
			mem_node_config[mnode].physmax = start - 1;
	} else {

		/*
		 * Let the common lgrp framework know the mnode is
		 * leaving
		 */
		lgrp_config(LGRP_CONFIG_MEM_DEL, mnode,
		    MEM_NODE_2_LGRPHAND(mnode));

		/*
		 * Delete the whole node.
		 */
		ASSERT(MNODE_PGCNT(mnode) == 0);
		do {
			omask = memnodes_mask;
			nmask = omask & ~(1ull << mnode);
		} while (cas64(&memnodes_mask, omask, nmask) != omask);
		atomic_add_16(&num_memnodes, -1);
		mem_node_config[mnode].exists = 0;
	}
}
示例#11
0
/*
 * Atomically decrement a counter, but only if it will remain
 * strictly positive (greater than zero) afterwards.  We return
 * the decremented value if so, otherwise zero (in which case
 * the counter is unchanged).
 *
 * This is used for keeping track of available resources such
 * as transmit ring slots ...
 */
uint64_t
bge_atomic_reserve(uint64_t *count_p, uint64_t n)
{
	uint64_t oldval;
	uint64_t newval;

	/* ATOMICALLY */
	do {
		oldval = *count_p;
		newval = oldval - n;
		if (oldval <= n)
			return (0);		/* no resources left	*/
	} while (cas64(count_p, oldval, newval) != oldval);

	return (newval);
}
示例#12
0
void
mem_node_add_slice(pfn_t start, pfn_t end)
{
	int mnode;
	mnodeset_t newmask, oldmask;

	/*
	 * DR will pass us the first pfn that is allocatable.
	 * We need to round down to get the real start of
	 * the slice.
	 */
	if (mem_node_physalign) {
		start &= ~(btop(mem_node_physalign) - 1);
		end = roundup(end, btop(mem_node_physalign)) - 1;
	}

	mnode = PFN_2_MEM_NODE(start);
	ASSERT(mnode >= 0 && mnode < max_mem_nodes);

	if (cas32((uint32_t *)&mem_node_config[mnode].exists, 0, 1)) {
		/*
		 * Add slice to existing node.
		 */
		if (start < mem_node_config[mnode].physbase)
			mem_node_config[mnode].physbase = start;
		if (end > mem_node_config[mnode].physmax)
			mem_node_config[mnode].physmax = end;
	} else {
		mem_node_config[mnode].physbase = start;
		mem_node_config[mnode].physmax = end;
		atomic_add_16(&num_memnodes, 1);
		do {
			oldmask = memnodes_mask;
			newmask = memnodes_mask | (1ull << mnode);
		} while (cas64(&memnodes_mask, oldmask, newmask) != oldmask);
	}

	/*
	 * Inform the common lgrp framework about the new memory
	 */
	lgrp_config(LGRP_CONFIG_MEM_ADD, mnode, MEM_NODE_2_LGRPHAND(mnode));
}