Ejemplo n.º 1
0
/*
 * Called on a per-cpu basis
 */
void
initclocks_pcpu(void)
{
	struct globaldata *gd = mycpu;

	crit_enter();
	if (gd->gd_cpuid == 0) {
	    gd->gd_time_seconds = 1;
	    gd->gd_cpuclock_base = sys_cputimer->count();
	} else {
	    /* XXX */
	    gd->gd_time_seconds = globaldata_find(0)->gd_time_seconds;
	    gd->gd_cpuclock_base = globaldata_find(0)->gd_cpuclock_base;
	}

	systimer_intr_enable();

#ifdef IFPOLL_ENABLE
	ifpoll_init_pcpu(gd->gd_cpuid);
#endif

	/*
	 * Use a non-queued periodic systimer to prevent multiple ticks from
	 * building up if the sysclock jumps forward (8254 gets reset).  The
	 * sysclock will never jump backwards.  Our time sync is based on
	 * the actual sysclock, not the ticks count.
	 */
	systimer_init_periodic_nq(&gd->gd_hardclock, hardclock, NULL, hz);
	systimer_init_periodic_nq(&gd->gd_statclock, statclock, NULL, stathz);
	/* XXX correct the frequency for scheduler / estcpu tests */
	systimer_init_periodic_nq(&gd->gd_schedclock, schedclock, 
				NULL, ESTCPUFREQ); 
	crit_exit();
}
Ejemplo n.º 2
0
static void
poll_comm_start(int cpuid)
{
	struct poll_comm *comm = poll_common[cpuid];
	systimer_func_t func;

	/*
	 * Initialize systimer
	 */
	if (cpuid == 0)
		func = poll_comm_systimer0;
	else
		func = poll_comm_systimer;
	systimer_init_periodic_nq(&comm->pollclock, func, comm, 1);
}
Ejemplo n.º 3
0
/*
 * Initialize per-cpu polling(4) context.  Called from kern_clock.c:
 */
void
init_device_poll_pcpu(int cpuid)
{
	struct pollctx *pctx;
	char cpuid_str[3];

	if (cpuid >= POLLCTX_MAX)
		return;

	if ((CPUMASK(cpuid) & poll_cpumask0) == 0)
		return;

	if (poll_burst_max < MIN_POLL_BURST_MAX)
		poll_burst_max = MIN_POLL_BURST_MAX;
	else if (poll_burst_max > MAX_POLL_BURST_MAX)
		poll_burst_max = MAX_POLL_BURST_MAX;

	if (poll_each_burst > poll_burst_max)
		poll_each_burst = poll_burst_max;

	poll_cpumask |= CPUMASK(cpuid);

	pctx = kmalloc(sizeof(*pctx), M_DEVBUF, M_WAITOK | M_ZERO);

	pctx->poll_each_burst = poll_each_burst;
	pctx->poll_burst_max = poll_burst_max;
	pctx->user_frac = 50;
	pctx->reg_frac = 20;
	pctx->polling_enabled = polling_enabled;
	pctx->pollhz = pollhz;
	pctx->poll_cpuid = cpuid;
	poll_reset_state(pctx);

	netmsg_init(&pctx->poll_netmsg, NULL, &netisr_adone_rport,
		    0, netisr_poll);
#ifdef INVARIANTS
	pctx->poll_netmsg.lmsg.u.ms_resultp = pctx;
#endif

	netmsg_init(&pctx->poll_more_netmsg, NULL, &netisr_adone_rport,
		    0, netisr_pollmore);
#ifdef INVARIANTS
	pctx->poll_more_netmsg.lmsg.u.ms_resultp = pctx;
#endif

	KASSERT(cpuid < POLLCTX_MAX, ("cpu id must < %d", cpuid));
	poll_context[cpuid] = pctx;

	if (poll_defcpu < 0) {
		poll_defcpu = cpuid;

		/*
		 * Initialize global sysctl nodes, for compat
		 */
		poll_add_sysctl(NULL, SYSCTL_STATIC_CHILDREN(_kern_polling),
				pctx);
	}

	/*
	 * Initialize per-cpu sysctl nodes
	 */
	ksnprintf(cpuid_str, sizeof(cpuid_str), "%d", pctx->poll_cpuid);

	sysctl_ctx_init(&pctx->poll_sysctl_ctx);
	pctx->poll_sysctl_tree = SYSCTL_ADD_NODE(&pctx->poll_sysctl_ctx,
				 SYSCTL_STATIC_CHILDREN(_kern_polling),
				 OID_AUTO, cpuid_str, CTLFLAG_RD, 0, "");
	poll_add_sysctl(&pctx->poll_sysctl_ctx,
			SYSCTL_CHILDREN(pctx->poll_sysctl_tree), pctx);

	/*
	 * Initialize systimer
	 */
	systimer_init_periodic_nq(&pctx->pollclock, pollclock, pctx, 1);
}
Ejemplo n.º 4
0
void *
register_int(int intr, inthand2_t *handler, void *arg, const char *name,
		struct lwkt_serialize *serializer, int intr_flags, int cpuid)
{
    struct intr_info *info;
    struct intrec **list;
    intrec_t rec;
    int orig_cpuid;

    KKASSERT(cpuid >= 0 && cpuid < ncpus);

    if (intr < 0 || intr >= MAX_INTS)
	panic("register_int: bad intr %d", intr);
    if (name == NULL)
	name = "???";
    info = &intr_info_ary[cpuid][intr];

    /*
     * Construct an interrupt handler record
     */
    rec = kmalloc(sizeof(struct intrec), M_DEVBUF, M_INTWAIT);
    rec->name = kmalloc(strlen(name) + 1, M_DEVBUF, M_INTWAIT);
    strcpy(rec->name, name);

    rec->info = info;
    rec->handler = handler;
    rec->argument = arg;
    rec->intr = intr;
    rec->intr_flags = intr_flags;
    rec->next = NULL;
    rec->serializer = serializer;

    int_moveto_destcpu(&orig_cpuid, cpuid);

    /*
     * Create an emergency polling thread and set up a systimer to wake
     * it up.
     */
    if (emergency_intr_thread[cpuid].td_kstack == NULL) {
	lwkt_create(ithread_emergency, NULL, NULL,
		    &emergency_intr_thread[cpuid],
		    TDF_NOSTART | TDF_INTTHREAD, cpuid, "ithreadE %d",
		    cpuid);
	systimer_init_periodic_nq(&emergency_intr_timer[cpuid],
		    emergency_intr_timer_callback,
		    &emergency_intr_thread[cpuid],
		    (emergency_intr_enable ? emergency_intr_freq : 1));
    }

    /*
     * Create an interrupt thread if necessary, leave it in an unscheduled
     * state.
     */
    if (info->i_state == ISTATE_NOTHREAD) {
	info->i_state = ISTATE_NORMAL;
	lwkt_create(ithread_handler, (void *)(intptr_t)intr, NULL,
		    &info->i_thread, TDF_NOSTART | TDF_INTTHREAD, cpuid,
		    "ithread%d %d", intr, cpuid);
	if (intr >= FIRST_SOFTINT)
	    lwkt_setpri(&info->i_thread, TDPRI_SOFT_NORM);
	else
	    lwkt_setpri(&info->i_thread, TDPRI_INT_MED);
	info->i_thread.td_preemptable = lwkt_preempt;
    }

    list = &info->i_reclist;

    /*
     * Keep track of how many fast and slow interrupts we have.
     * Set i_mplock_required if any handler in the chain requires
     * the MP lock to operate.
     */
    if ((intr_flags & INTR_MPSAFE) == 0)
	info->i_mplock_required = 1;
    if (intr_flags & INTR_CLOCK)
	++info->i_fast;
    else
	++info->i_slow;

    /*
     * Enable random number generation keying off of this interrupt.
     */
    if ((intr_flags & INTR_NOENTROPY) == 0 && info->i_random.sc_enabled == 0) {
	info->i_random.sc_enabled = 1;
	info->i_random.sc_intr = intr;
    }

    /*
     * Add the record to the interrupt list.
     */
    crit_enter();
    while (*list != NULL)
	list = &(*list)->next;
    *list = rec;
    crit_exit();

    /*
     * Update max_installed_hard_intr to make the emergency intr poll
     * a bit more efficient.
     */
    if (intr < FIRST_SOFTINT) {
	if (max_installed_hard_intr[cpuid] <= intr)
	    max_installed_hard_intr[cpuid] = intr + 1;
    }

    if (intr >= FIRST_SOFTINT)
	swi_info_ary[intr - FIRST_SOFTINT] = info;

    /*
     * Setup the machine level interrupt vector
     */
    if (intr < FIRST_SOFTINT && info->i_slow + info->i_fast == 1)
	machintr_intr_setup(intr, intr_flags);

    int_moveto_origcpu(orig_cpuid, cpuid);

    return(rec);
}