Esempio n. 1
0
static void
cyclic_test_003(void)
{
	int error = 0;
	cyc_handler_t hdlr;
	cyc_time_t when;
	cyclic_id_t id;
	cyclic_id_t id1;
	cyclic_id_t id2;
	cyclic_id_t id3;

	printf("%s: starting\n",__func__);

	hdlr.cyh_func = (cyc_func_t) cyclic_test_003_func;
 
        when.cyt_when = 0;

	nanotime(&test_003_start);

	mutex_enter(&cpu_lock);

        when.cyt_interval = 200000000;
        hdlr.cyh_arg = (void *) 0UL;
        id = cyclic_add(&hdlr, &when);

        when.cyt_interval = 400000000;
        hdlr.cyh_arg = (void *) 1UL;
        id1 = cyclic_add(&hdlr, &when);

        hdlr.cyh_arg = (void *) 2UL;
        when.cyt_interval = 1000000000;
        id2 = cyclic_add(&hdlr, &when);

        hdlr.cyh_arg = (void *) 3UL;
        when.cyt_interval = 1300000000;
        id3 = cyclic_add(&hdlr, &when);

	mutex_exit(&cpu_lock);

	DELAY(1200000);

	mutex_enter(&cpu_lock);

	cyclic_remove(id);
	cyclic_remove(id1);
	cyclic_remove(id2);
	cyclic_remove(id3);

	mutex_exit(&cpu_lock);

	printf("%s: %s\n",__func__, error == 0 ? "passed":"failed");
}
Esempio n. 2
0
/*ARGSUSED*/
static void
profile_enable(void *arg, dtrace_id_t id, void *parg)
{
	profile_probe_t *prof = parg;
	cyc_omni_handler_t omni;
	cyc_handler_t hdlr;
	cyc_time_t when;

	ASSERT(prof->prof_interval != 0);
	ASSERT(MUTEX_HELD(&cpu_lock));

	if (prof->prof_kind == PROF_TICK) {
		hdlr.cyh_func = profile_tick;
		hdlr.cyh_arg = prof;
		hdlr.cyh_level = CY_HIGH_LEVEL;

		when.cyt_interval = prof->prof_interval;
		when.cyt_when = dtrace_gethrtime() + when.cyt_interval;
	} else {
		ASSERT(prof->prof_kind == PROF_PROFILE);
		omni.cyo_online = profile_online;
		omni.cyo_offline = profile_offline;
		omni.cyo_arg = prof;
	}

	if (prof->prof_kind == PROF_TICK) {
		prof->prof_cyclic = cyclic_add(&hdlr, &when);
	} else {
		prof->prof_cyclic = cyclic_add_omni(&omni);
	}
}
Esempio n. 3
0
static void
cyclic_test_001(void)
{
	int error = 0;
	cyc_handler_t hdlr;
	cyc_time_t when;
	cyclic_id_t id;

	printf("%s: starting\n",__func__);

	hdlr.cyh_func = (cyc_func_t) cyclic_test_001_func;
        hdlr.cyh_arg = 0;
 
        when.cyt_when = 0;
        when.cyt_interval = 1000000000;

	nanotime(&test_001_start);

	mutex_enter(&cpu_lock);

        id = cyclic_add(&hdlr, &when);

	mutex_exit(&cpu_lock);

	DELAY(1200000);

	mutex_enter(&cpu_lock);

	cyclic_remove(id);

	mutex_exit(&cpu_lock);

	printf("%s: %s\n",__func__, error == 0 ? "passed":"failed");
}
Esempio n. 4
0
static void
ntwdt_start_timer(ntwdt_state_t *ntwdt_ptr)
{
	ntwdt_runstate_t	*ntwdt_state = ntwdt_ptr->ntwdt_run_state;
	cyc_handler_t		*hdlr = &ntwdt_state->ntwdt_cycl_hdlr;
	cyc_time_t		*when = &ntwdt_state->ntwdt_cycl_time;

	/*
	 * Init the cyclic.
	 */
	when->cyt_interval = ntwdt_state->ntwdt_cyclic_interval;
	when->cyt_when = gethrtime() + when->cyt_interval;

	ntwdt_state->ntwdt_watchdog_expired = 0;
	ntwdt_state->ntwdt_timer_running = 1;

	mutex_enter(&cpu_lock);
	if (ntwdt_ptr->ntwdt_cycl_id == CYCLIC_NONE) {
		ntwdt_ptr->ntwdt_cycl_id = cyclic_add(hdlr, when);
	}
	mutex_exit(&cpu_lock);

	NTWDT_DBG(NTWDT_DBG_NTWDT, ("cyclic-driven timer is started"));
}
RTDECL(int) RTTimerStart(PRTTIMER pTimer, uint64_t u64First)
{
    RTTIMER_ASSERT_VALID_RET(pTimer);
    RT_ASSERT_INTS_ON();

    if (!pTimer->fSuspended)
        return VERR_TIMER_ACTIVE;

    /* One-shot timers are not supported by the cyclic system. */
    if (pTimer->interval == 0)
        return VERR_NOT_SUPPORTED;

    pTimer->fSuspended = false;
    if (pTimer->fAllCpu)
    {
        PRTR0OMNITIMERSOL pOmniTimer = RTMemAllocZ(sizeof(RTR0OMNITIMERSOL));
        if (RT_UNLIKELY(!pOmniTimer))
            return VERR_NO_MEMORY;

        pOmniTimer->au64Ticks = RTMemAllocZ(RTMpGetCount() * sizeof(uint64_t));
        if (RT_UNLIKELY(!pOmniTimer->au64Ticks))
        {
            RTMemFree(pOmniTimer);
            return VERR_NO_MEMORY;
        }

        /*
         * Setup omni (all CPU) timer. The Omni-CPU online event will fire
         * and from there we setup periodic timers per CPU.
         */
        pTimer->pOmniTimer = pOmniTimer;
        pOmniTimer->u64When     = pTimer->interval + RTTimeNanoTS();

        cyc_omni_handler_t hOmni;
        hOmni.cyo_online        = rtTimerSolOmniCpuOnline;
        hOmni.cyo_offline       = NULL;
        hOmni.cyo_arg           = pTimer;

        mutex_enter(&cpu_lock);
        pTimer->hCyclicId = cyclic_add_omni(&hOmni);
        mutex_exit(&cpu_lock);
    }
    else
    {
        int iCpu = SOL_TIMER_ANY_CPU;
        if (pTimer->fSpecificCpu)
        {
            iCpu = pTimer->iCpu;
            if (!RTMpIsCpuOnline(iCpu))    /* ASSUMES: index == cpuid */
                return VERR_CPU_OFFLINE;
        }

        PRTR0SINGLETIMERSOL pSingleTimer = RTMemAllocZ(sizeof(RTR0SINGLETIMERSOL));
        if (RT_UNLIKELY(!pSingleTimer))
            return VERR_NO_MEMORY;

        pTimer->pSingleTimer = pSingleTimer;
        pSingleTimer->hHandler.cyh_func  = rtTimerSolCallbackWrapper;
        pSingleTimer->hHandler.cyh_arg   = pTimer;
        pSingleTimer->hHandler.cyh_level = CY_LOCK_LEVEL;

        mutex_enter(&cpu_lock);
        if (iCpu != SOL_TIMER_ANY_CPU && !cpu_is_online(cpu[iCpu]))
        {
            mutex_exit(&cpu_lock);
            RTMemFree(pSingleTimer);
            pTimer->pSingleTimer = NULL;
            return VERR_CPU_OFFLINE;
        }

        pSingleTimer->hFireTime.cyt_when = u64First + RTTimeNanoTS();
        if (pTimer->interval == 0)
        {
            /** @todo use gethrtime_max instead of LLONG_MAX? */
            AssertCompileSize(pSingleTimer->hFireTime.cyt_interval, sizeof(long long));
            pSingleTimer->hFireTime.cyt_interval = LLONG_MAX - pSingleTimer->hFireTime.cyt_when;
        }
        else
            pSingleTimer->hFireTime.cyt_interval = pTimer->interval;

        pTimer->hCyclicId = cyclic_add(&pSingleTimer->hHandler, &pSingleTimer->hFireTime);
        if (iCpu != SOL_TIMER_ANY_CPU)
            cyclic_bind(pTimer->hCyclicId, cpu[iCpu], NULL /* cpupart */);

        mutex_exit(&cpu_lock);
    }

    return VINF_SUCCESS;
}
Esempio n. 6
0
int
rmc_comm_serdev_init(struct rmc_comm_state *rcs, dev_info_t *dip)
{
	cyc_handler_t cychand;
	cyc_time_t cyctime;
	int err = DDI_SUCCESS;

	rcs->sd_state.cycid = CYCLIC_NONE;

	/*
	 *  Online the hardware ...
	 */
	err = rmc_comm_online(rcs, dip);
	if (err != 0)
		return (-1);

	/*
	 * call ddi_get_soft_iblock_cookie() to retrieve the
	 * the interrupt block cookie so that the mutexes are initialized
	 * before adding the interrupt (to avoid a potential race condition).
	 */

	err = ddi_get_soft_iblock_cookie(dip, DDI_SOFTINT_LOW,
	    &rcs->dp_state.dp_iblk);
	if (err != DDI_SUCCESS)
		return (-1);

	err = ddi_get_iblock_cookie(dip, 0, &rcs->sd_state.hw_iblk);
	if (err != DDI_SUCCESS)
		return (-1);

	/*
	 * initialize mutex here before adding hw/sw interrupt handlers
	 */
	mutex_init(rcs->dp_state.dp_mutex, NULL, MUTEX_DRIVER,
	    rcs->dp_state.dp_iblk);

	mutex_init(rcs->sd_state.hw_mutex, NULL, MUTEX_DRIVER,
	    rcs->sd_state.hw_iblk);

	/*
	 * Install soft and hard interrupt handler(s)
	 *
	 * the soft intr. handler will need the data protocol lock (dp_mutex)
	 * So, data protocol mutex and iblock cookie are created/initialized
	 * here
	 */

	err = ddi_add_softintr(dip, DDI_SOFTINT_LOW, &rcs->sd_state.softid,
	    &rcs->dp_state.dp_iblk, NULL, rmc_comm_softint, (caddr_t)rcs);
	if (err != DDI_SUCCESS) {
		mutex_destroy(rcs->dp_state.dp_mutex);
		mutex_destroy(rcs->sd_state.hw_mutex);
		return (-1);
	}

	/*
	 * hardware interrupt
	 */

	if (rcs->sd_state.sio_handle != NULL) {
		err = ddi_add_intr(dip, 0, &rcs->sd_state.hw_iblk, NULL,
			rmc_comm_hi_intr, (caddr_t)rcs);

		/*
		 * did we successfully install the h/w interrupt handler?
		 */
		if (err != DDI_SUCCESS) {
			ddi_remove_softintr(rcs->sd_state.softid);
			mutex_destroy(rcs->dp_state.dp_mutex);
			mutex_destroy(rcs->sd_state.hw_mutex);
			return (-1);
		}
	}


	/*
	 * Start cyclic callbacks
	 */

	cychand.cyh_func = rmc_comm_cyclic;
	cychand.cyh_arg = rcs;
	cychand.cyh_level = CY_LOW_LEVEL;
	cyctime.cyt_when = 0;			/* from the next second	*/
	cyctime.cyt_interval = 5*RMC_COMM_ONE_SEC; /* call at 5s intervals */
	mutex_enter(&cpu_lock);
	rcs->sd_state.cycid = cyclic_add(&cychand, &cyctime);
	mutex_exit(&cpu_lock);

	return (0);
}
RTDECL(int) RTTimerStart(PRTTIMER pTimer, uint64_t u64First)
{
    RTTIMER_ASSERT_VALID_RET(pTimer);
    RT_ASSERT_INTS_ON();

    /*
     * It's not possible to restart a one-shot time from it's callback function,
     * at least not at the moment.
     */
    AssertReturn(!rtTimerSolIsCallingFromTimerProc(pTimer), VERR_INVALID_CONTEXT);

    mutex_enter(&cpu_lock);

    /*
     * Make sure it's not active already.  If it was suspended from a timer
     * callback function, we need to do some cleanup work here before we can
     * restart the timer.
     */
    if (!pTimer->fSuspended)
    {
        if (!pTimer->fSuspendedFromTimer)
        {
            mutex_exit(&cpu_lock);
            return VERR_TIMER_ACTIVE;
        }
        cyclic_remove(pTimer->hCyclicId);
        pTimer->hCyclicId = CYCLIC_NONE;
    }

    pTimer->fSuspended = false;
    pTimer->fSuspendedFromTimer = false;
    pTimer->fIntervalChanged = false;
    if (pTimer->fAllCpus)
    {
        /*
         * Setup omni (all CPU) timer. The Omni-CPU online event will fire
         * and from there we setup periodic timers per CPU.
         */
        pTimer->u.Omni.u64When  = RTTimeSystemNanoTS() + (u64First ? u64First : pTimer->cNsInterval);

        cyc_omni_handler_t HandlerOmni;
        HandlerOmni.cyo_online  = rtTimerSolOmniCpuOnline;
        HandlerOmni.cyo_offline = NULL;
        HandlerOmni.cyo_arg     = pTimer;

        pTimer->hCyclicId = cyclic_add_omni(&HandlerOmni);
    }
    else
    {
        cyc_handler_t Handler;
        cyc_time_t    FireTime;

        /*
         * Setup a single CPU timer.   If a specific CPU was requested, it
         * must be online or the timer cannot start.
         */
        if (   pTimer->fSpecificCpu
            && !RTMpIsCpuOnline(pTimer->iCpu)) /* ASSUMES: index == cpuid */
        {
            pTimer->fSuspended = true;

            mutex_exit(&cpu_lock);
            return VERR_CPU_OFFLINE;
        }

        Handler.cyh_func  = (cyc_func_t)rtTimerSolSingleCallbackWrapper;
        Handler.cyh_arg   = pTimer;
        Handler.cyh_level = CY_LOCK_LEVEL;

        /*
         * Use a large interval (1 hour) so that we don't get a timer-callback between
         * cyclic_add() and cyclic_bind(). Program the correct interval once cyclic_bind() is done.
         * See @bugref{7691#c20}.
         */
        if (!pTimer->fSpecificCpu)
            FireTime.cyt_when = RTTimeSystemNanoTS() + u64First;
        else
            FireTime.cyt_when = RTTimeSystemNanoTS() + u64First + RT_NS_1HOUR;
        FireTime.cyt_interval = pTimer->cNsInterval != 0
                              ? pTimer->cNsInterval
                              : CY_INFINITY /* Special value, see cyclic_fire(). */;
        pTimer->u.Single.u64Tick = 0;
        pTimer->u.Single.nsNextTick = 0;

        pTimer->hCyclicId = cyclic_add(&Handler, &FireTime);
        if (pTimer->fSpecificCpu)
        {
            cyclic_bind(pTimer->hCyclicId, cpu[pTimer->iCpu], NULL /* cpupart */);
            cyclic_reprogram(pTimer->hCyclicId, RTTimeSystemNanoTS() + u64First);
        }
    }

    mutex_exit(&cpu_lock);
    return VINF_SUCCESS;
}
static int ztdummy_attach(dev_info_t *dip, ddi_attach_cmd_t cmd)
{
    struct ztdummy_state *ztd;
    int instance, status;
    char *getdev_name;
    cyc_time_t when;
    cyc_handler_t hdlr;

    switch (cmd) {
    case DDI_RESUME:
        cmn_err(CE_CONT, "ztdummy: Ignoring attach_RESUME");
        return DDI_FAILURE;
    case DDI_PM_RESUME:
        cmn_err(CE_CONT, "ztdummy: Ignoring attach_PM_RESUME");
        return DDI_FAILURE;
    case DDI_ATTACH:
        break;
    default:
        cmn_err(CE_CONT, "ztdummy: unknown attach command %d", cmd);
        return DDI_FAILURE;
    }

    instance = ddi_get_instance(dip);

    if (ddi_soft_state_zalloc(ztdummy_statep, instance) != DDI_SUCCESS)
    {
      cmn_err(CE_CONT, "ztdummy%d: Failed to alloc soft state", instance);
      return DDI_FAILURE;
    }

    /* Get pointer to that memory */
    ztd = ddi_get_soft_state(ztdummy_statep, instance);

    if (ztd == NULL) {
	    cmn_err(CE_CONT, "ztdummy: Unable to allocate memory\n");
	    ddi_soft_state_free(ztdummy_statep, instance);
	    return DDI_FAILURE;
    }

    ztd->dip = dip;

    if (ztdummy_initialize(ztd)) {
		cmn_err(CE_CONT, "ztdummy: Unable to intialize zaptel driver\n");
		ddi_soft_state_free(ztdummy_statep, instance);
		return DDI_FAILURE;
    }

	/*
	 * Setup a high-resolution timer using an undocumented API in the kernel
	 *
	 * For more information visit the URL below:
	 * http://blogs.sun.com/roller/page/eschrock?entry=inside_the_cyclic_subsystem
	 *
	 */
    hdlr.cyh_func = ztdummy_timer;
    hdlr.cyh_arg = 0;
    hdlr.cyh_level = CY_LOW_LEVEL;

    when.cyt_when = 0;
    when.cyt_interval = 1000000; /* every 1ms */

    mutex_enter(&cpu_lock); 
    ztd->cyclic = cyclic_add(&hdlr, &when);
    mutex_exit(&cpu_lock);

    if (debug)
        cmn_err(CE_CONT, "ztdummy: init() finished\n");
    return 0;
}