int
gemini_ipm_produce(const void *adescp, size_t ndesc)
{
	const ipm_desc_t *descp = adescp;
	gemini_ipm_softc_t *sc = gemini_ipm_sc;
	ipmqindex_t ix_read;
	ipmqindex_t ix_write;

	KASSERT(ndesc == 1);			/* XXX TMP */

	DPRINTFN(2, ("%s:%d: %p %ld, tag %d\n",
		__FUNCTION__, __LINE__, descp, ndesc, descp->tag));
	ix_read = sc->sc_txqueue->ix_read;
	ix_write = sc->sc_txqueue->ix_write;
	if (ipmqisfull(ix_read, ix_write)) {
		/* we expect this to "never" happen; check your quotas */
		panic("%s: queue full\n", device_xname(sc->sc_dev));
	}
	gemini_ipm_desc_write(&sc->sc_txqueue->ipm_desc[ix_write], descp);
	sc->sc_txqueue->ix_write = ipmqnext(ix_write);
	sc->sc_txcount++; 

	ipi_send(); 

	gemini_ipm_count_txdone(sc); 

	return 0;
} 
示例#2
0
文件: smp.c 项目: 1800alex/linux
static inline void
send_IPI_single(int dest_cpu, enum ipi_message_type op)
{
	BUG_ON(dest_cpu == NO_PROC_ID);

	ipi_send(dest_cpu, op);
}
示例#3
0
static void
send_IPI_mask(const struct cpumask *mask, enum ipi_message_type op)
{
	int cpu;

	for_each_cpu(cpu, mask)
		ipi_send(cpu, op);
}
示例#4
0
文件: smp.c 项目: E-LLP/n900
static void
send_IPI_mask(cpumask_t mask, enum ipi_message_type op)
{
	int cpu;

	for_each_cpu_mask(cpu, mask)
		ipi_send(cpu, op);
}
示例#5
0
static inline void
send_IPI_single(int dest_cpu, enum ipi_message_type op)
{
	if (dest_cpu == NO_PROC_ID) {
		BUG();
		return;
	}

	ipi_send(dest_cpu, op);
}
示例#6
0
static int
ia64_highfp_ipi(struct pcpu *cpu)
{
	int error;

	ipi_send(cpu, ia64_ipi_highfp);
	error = msleep_spin(&cpu->pc_fpcurthread, &ia64_highfp_mtx,
	    "High FP", 0);
	return (error);
}
示例#7
0
void
ipi_broadcast(int code)
{
	unsigned i;
	struct cpu *c;

	for (i=0; i < cpuarray_num(&allcpus); i++) {
		c = cpuarray_get(&allcpus, i);
		if (c != curcpu->c_self) {
			ipi_send(c, code);
		}
	}
}
示例#8
0
/*
 * Thread migration.
 *
 * This is also called periodically from hardclock(). If the current
 * CPU is busy and other CPUs are idle, or less busy, it should move
 * threads across to those other other CPUs.
 *
 * Migrating threads isn't free because of cache affinity; a thread's
 * working cache set will end up having to be moved to the other CPU,
 * which is fairly slow. The tradeoff between this performance loss
 * and the performance loss due to underutilization of some CPUs is
 * something that needs to be tuned and probably is workload-specific.
 *
 * For here and now, because we know we're running on System/161 and
 * System/161 does not (yet) model such cache effects, we'll be very
 * aggressive.
 */
void
thread_consider_migration(void)
{
	unsigned my_count, total_count, one_share, to_send;
	unsigned i, numcpus;
	struct cpu *c;
	struct threadlist victims;
	struct thread *t;

	my_count = total_count = 0;
	numcpus = cpuarray_num(&allcpus);
	for (i=0; i<numcpus; i++) {
		c = cpuarray_get(&allcpus, i);
		spinlock_acquire(&c->c_runqueue_lock);
		total_count += c->c_runqueue.tl_count;
		if (c == curcpu->c_self) {
			my_count = c->c_runqueue.tl_count;
		}
		spinlock_release(&c->c_runqueue_lock);
	}

	one_share = DIVROUNDUP(total_count, numcpus);
	if (my_count < one_share) {
		return;
	}

	to_send = my_count - one_share;
	threadlist_init(&victims);
	spinlock_acquire(&curcpu->c_runqueue_lock);
	for (i=0; i<to_send; i++) {
		t = threadlist_remtail(&curcpu->c_runqueue);
		threadlist_addhead(&victims, t);
	}
	spinlock_release(&curcpu->c_runqueue_lock);

	for (i=0; i < numcpus && to_send > 0; i++) {
		c = cpuarray_get(&allcpus, i);
		if (c == curcpu->c_self) {
			continue;
		}
		spinlock_acquire(&c->c_runqueue_lock);
		while (c->c_runqueue.tl_count < one_share && to_send > 0) {
			t = threadlist_remhead(&victims);
			/*
			 * Ordinarily, curthread will not appear on
			 * the run queue. However, it can under the
			 * following circumstances:
			 *   - it went to sleep;
			 *   - the processor became idle, so it
			 *     remained curthread;
			 *   - it was reawakened, so it was put on the
			 *     run queue;
			 *   - and the processor hasn't fully unidled
			 *     yet, so all these things are still true.
			 *
			 * If the timer interrupt happens at (almost)
			 * exactly the proper moment, we can come here
			 * while things are in this state and see
			 * curthread. However, *migrating* curthread
			 * can cause bad things to happen (Exercise:
			 * Why? And what?) so shuffle it to the end of
			 * the list and decrement to_send in order to
			 * skip it. Then it goes back on our own run
			 * queue below.
			 */
			if (t == curthread) {
				threadlist_addtail(&victims, t);
				to_send--;
				continue;
			}

			t->t_cpu = c;
			threadlist_addtail(&c->c_runqueue, t);
			DEBUG(DB_THREADS,
			      "Migrated thread %s: cpu %u -> %u",
			      t->t_name, curcpu->c_number, c->c_number);
			to_send--;
			if (c->c_isidle) {
				/*
				 * Other processor is idle; send
				 * interrupt to make sure it unidles.
				 */
				ipi_send(c, IPI_UNIDLE);
			}
		}
		spinlock_release(&c->c_runqueue_lock);
	}

	/*
	 * Because the code above isn't atomic, the thread counts may have
	 * changed while we were working and we may end up with leftovers.
	 * Don't panic; just put them back on our own run queue.
	 */
	if (!threadlist_isempty(&victims)) {
		spinlock_acquire(&curcpu->c_runqueue_lock);
		while ((t = threadlist_remhead(&victims)) != NULL) {
			threadlist_addtail(&curcpu->c_runqueue, t);
		}
		spinlock_release(&curcpu->c_runqueue_lock);
	}

	KASSERT(threadlist_isempty(&victims));
	threadlist_cleanup(&victims);
}
示例#9
0
/*
 * ipi_send_mask
 *	Send an IPI to each cpu in mask.
 */
static inline void ipi_send_mask(unsigned int op, const struct cpumask mask)
{
	int cpu;
	for_each_cpu_mask(cpu, mask) {
		ipi_send(cpu, op);
	}