Esempio n. 1
0
/*
 * npfctl_stats: export collected statistics.
 */
static int
npfctl_stats(void *data)
{
	uint64_t *fullst, *uptr = *(uint64_t **)data;
	int error;

	fullst = kmem_zalloc(NPF_STATS_SIZE, KM_SLEEP);
	percpu_foreach(npf_stats_percpu, npf_stats_collect, fullst);
	error = copyout(fullst, uptr, NPF_STATS_SIZE);
	kmem_free(fullst, NPF_STATS_SIZE);
	return error;
}
Esempio n. 2
0
void
pic_disestablish_source(struct intrsource *is)
{
	struct pic_softc * const pic = is->is_pic;
	const int irq = is->is_irq;

	KASSERT(is == pic->pic_sources[irq]);

	(*pic->pic_ops->pic_block_irqs)(pic, irq & ~0x1f, __BIT(irq & 0x1f));
	pic->pic_sources[irq] = NULL;
	pic__iplsources[pic_ipl_offset[is->is_ipl] + is->is_iplidx] = NULL;
	/*
	 * Now detach the per-cpu evcnts.
	 */
	percpu_foreach(pic->pic_percpu, pic_percpu_evcnt_deattach, is);

	kmem_free(is, sizeof(*is));
}
Esempio n. 3
0
void *
pic_establish_intr(struct pic_softc *pic, int irq, int ipl, int type,
	int (*func)(void *), void *arg)
{
	struct intrsource *is;
	int off, nipl;

	if (pic->pic_sources[irq]) {
		printf("pic_establish_intr: pic %s irq %d already present\n",
		    pic->pic_name, irq);
		return NULL;
	}

	is = kmem_zalloc(sizeof(*is), KM_SLEEP);
	if (is == NULL)
		return NULL;

	is->is_pic = pic;
	is->is_irq = irq;
	is->is_ipl = ipl;
	is->is_type = type;
	is->is_func = func;
	is->is_arg = arg;

	if (pic->pic_ops->pic_source_name)
		(*pic->pic_ops->pic_source_name)(pic, irq, is->is_source,
		    sizeof(is->is_source));
	else
		snprintf(is->is_source, sizeof(is->is_source), "irq %d", irq);

	/*
	 * Now attach the per-cpu evcnts.
	 */
	percpu_foreach(pic->pic_percpu, pic_percpu_evcnt_attach, is);

	pic->pic_sources[irq] = is;

	/*
	 * First try to use an existing slot which is empty.
	 */
	for (off = pic_ipl_offset[ipl]; off < pic_ipl_offset[ipl+1]; off++) {
		if (pic__iplsources[off] == NULL) {
			is->is_iplidx = off - pic_ipl_offset[ipl];
			pic__iplsources[off] = is;
			return is;
		}
	}

	/*
	 * Move up all the sources by one.
 	 */
	if (ipl < NIPL) {
		off = pic_ipl_offset[ipl+1];
		memmove(&pic__iplsources[off+1], &pic__iplsources[off],
		    sizeof(pic__iplsources[0]) * (pic_ipl_offset[NIPL] - off));
	}

	/*
	 * Advance the offset of all IPLs higher than this.  Include an
	 * extra one as well.  Thus the number of sources per ipl is
	 * pic_ipl_offset[ipl+1] - pic_ipl_offset[ipl].
	 */
	for (nipl = ipl + 1; nipl <= NIPL; nipl++)
		pic_ipl_offset[nipl]++;

	/*
	 * Insert into the previously made position at the end of this IPL's
	 * sources.
	 */
	off = pic_ipl_offset[ipl + 1] - 1;
	is->is_iplidx = off - pic_ipl_offset[ipl];
	pic__iplsources[off] = is;

	(*pic->pic_ops->pic_establish_irq)(pic, is);

	(*pic->pic_ops->pic_unblock_irqs)(pic, is->is_irq & ~0x1f,
	    __BIT(is->is_irq & 0x1f));
	
	/* We're done. */
	return is;
}
Esempio n. 4
0
void
pic_add(struct pic_softc *pic, int irqbase)
{
	int slot, maybe_slot = -1;

	KASSERT(strlen(pic->pic_name) > 0);

	for (slot = 0; slot < PIC_MAXPICS; slot++) {
		struct pic_softc * const xpic = pic_list[slot];
		if (xpic == NULL) {
			if (maybe_slot < 0)
				maybe_slot = slot;
			if (irqbase < 0)
				break;
			continue;
		}
		if (irqbase < 0 || xpic->pic_irqbase < 0)
			continue;
		if (irqbase >= xpic->pic_irqbase + xpic->pic_maxsources)
			continue;
		if (irqbase + pic->pic_maxsources <= xpic->pic_irqbase)
			continue;
		panic("pic_add: pic %s (%zu sources @ irq %u) conflicts"
		    " with pic %s (%zu sources @ irq %u)",
		    pic->pic_name, pic->pic_maxsources, irqbase,
		    xpic->pic_name, xpic->pic_maxsources, xpic->pic_irqbase);
	}
	slot = maybe_slot;
#if 0
	printf("%s: pic_sourcebase=%zu pic_maxsources=%zu\n",
	    pic->pic_name, pic_sourcebase, pic->pic_maxsources);
#endif
	KASSERTMSG(pic->pic_maxsources <= PIC_MAXSOURCES, "%zu",
	    pic->pic_maxsources);
	KASSERT(pic_sourcebase + pic->pic_maxsources <= PIC_MAXMAXSOURCES);

	/*
	 * Allocate a pointer to each cpu's evcnts and then, for each cpu,
	 * allocate its evcnts and then attach an evcnt for each pin.
	 * We can't allocate the evcnt structures directly since
	 * percpu will move the contents of percpu memory around and 
	 * corrupt the pointers in the evcnts themselves.  Remember, any
	 * problem can be solved with sufficient indirection.
	 */
	pic->pic_percpu = percpu_alloc(sizeof(struct pic_percpu));
	KASSERT(pic->pic_percpu != NULL);

	/*
	 * Now allocate the per-cpu evcnts.
	 */
	percpu_foreach(pic->pic_percpu, pic_percpu_allocate, pic);

	pic->pic_sources = &pic_sources[pic_sourcebase];
	pic->pic_irqbase = irqbase;
	pic_sourcebase += pic->pic_maxsources;
	pic->pic_id = slot;
#ifdef __HAVE_PIC_SET_PRIORITY
	KASSERT((slot == 0) == (pic->pic_ops->pic_set_priority != NULL));
#endif
#ifdef MULTIPROCESSOR
	KASSERT((slot == 0) == (pic->pic_ops->pic_ipi_send != NULL));
#endif
	pic_list[slot] = pic;
}
Esempio n. 5
0
static void
percpu_zero(percpu_t *pc, size_t sz)
{

    percpu_foreach(pc, percpu_zero_cb, (void *)(uintptr_t)sz);
}