Exemple #1
0
static void
ecc_x3400_stop(device_t dev)
{
	struct ecc_x3400_softc *sc = device_get_softc(dev);

	callout_stop_sync(&sc->ecc_callout);
}
Exemple #2
0
/*
 * Start or restart a timeout.  Installs the callout structure on the
 * callwheel.  Callers may legally pass any value, even if 0 or negative,
 * but since the sc->curticks index may have already been processed a
 * minimum timeout of 1 tick will be enforced.
 *
 * This function will block if the callout is currently queued to a different
 * cpu or the callback is currently running in another thread.
 */
void
callout_reset(struct callout *c, int to_ticks, void (*ftn)(void *), void *arg)
{
	softclock_pcpu_t sc;
	globaldata_t gd;

#ifdef INVARIANTS
        if ((c->c_flags & CALLOUT_DID_INIT) == 0) {
		callout_init(c);
		kprintf(
		    "callout_reset(%p) from %p: callout was not initialized\n",
		    c, ((int **)&c)[-1]);
		print_backtrace(-1);
	}
#endif
	gd = mycpu;
	sc = &softclock_pcpu_ary[gd->gd_cpuid];
	crit_enter_gd(gd);

	/*
	 * Our cpu must gain ownership of the callout and cancel anything
	 * still running, which is complex.  The easiest way to do it is to
	 * issue a callout_stop().
	 *
	 * Clearing bits on flags is a way to guarantee they are not set,
	 * as the cmpset atomic op will fail otherwise.  PENDING and ARMED
	 * must not be set, if we find them set we loop up and call
	 * stop_sync() again.
	 *
	 */
	for (;;) {
		int flags;
		int nflags;

		callout_stop_sync(c);
		flags = c->c_flags & ~(CALLOUT_PENDING | CALLOUT_ARMED);
		nflags = (flags & ~(CALLOUT_CPU_MASK |
				    CALLOUT_EXECUTED)) |
			 CALLOUT_CPU_TO_FLAGS(gd->gd_cpuid) |
			 CALLOUT_ARMED |
			 CALLOUT_PENDING |
			 CALLOUT_ACTIVE;
		if (atomic_cmpset_int(&c->c_flags, flags, nflags))
			break;
	}


	if (to_ticks <= 0)
		to_ticks = 1;

	c->c_arg = arg;
	c->c_func = ftn;
	c->c_time = sc->curticks + to_ticks;

	TAILQ_INSERT_TAIL(&sc->callwheel[c->c_time & cwheelmask],
			  c, c_links.tqe);
	crit_exit_gd(gd);
}
Exemple #3
0
static int
rdrand_detach(device_t dev)
{
	struct rdrand_softc *sc;

	sc = device_get_softc(dev);

	callout_stop_sync(&sc->sc_rng_co);

	return (0);
}
Exemple #4
0
int
ata_reinit(device_t dev)
{
    struct ata_channel *ch = device_get_softc(dev);
    struct ata_request *request;
    device_t *children;
    int nchildren, i;

    /* check that we have a valid channel to reinit */
    if (!ch || !ch->r_irq)
	return ENXIO;

    if (bootverbose)
	device_printf(dev, "reiniting channel ..\n");

    /* poll for locking the channel */
    while (ATA_LOCKING(dev, ATA_LF_LOCK) != ch->unit)
	tsleep(&dev, 0, "atarini", 1);

    /* catch eventual request in ch->running */
    lockmgr(&ch->state_mtx, LK_EXCLUSIVE);
    if ((request = ch->running))
	callout_stop_sync(&request->callout);
    ch->running = NULL;

    /* unconditionally grap the channel lock */
    ch->state |= ATA_STALL_QUEUE;
    lockmgr(&ch->state_mtx, LK_RELEASE);

    /* reset the controller HW, the channel and device(s) */
    ATA_RESET(dev);

    /* reinit the children and delete any that fails */
    if (!device_get_children(dev, &children, &nchildren)) {
	get_mplock();
	for (i = 0; i < nchildren; i++) {
	    /* did any children go missing ? */
	    if (children[i] && device_is_attached(children[i]) &&
		ATA_REINIT(children[i])) {
		/*
		 * if we had a running request and its device matches
		 * this child we need to inform the request that the 
		 * device is gone.
		 */
		if (request && request->dev == children[i]) {
		    request->result = ENXIO;
		    device_printf(request->dev, "FAILURE - device detached\n");

		    /* if not timeout finish request here */
		    if (!(request->flags & ATA_R_TIMEOUT))
			    ata_finish(request);
		    request = NULL;
		}
		device_delete_child(dev, children[i]);
	    }
	}
	kfree(children, M_TEMP);
	rel_mplock();
    }

    /* if we still have a good request put it on the queue again */
    if (request && !(request->flags & ATA_R_TIMEOUT)) {
	device_printf(request->dev,
		      "WARNING - %s requeued due to channel reset",
		      ata_cmd2str(request));
	if (!(request->flags & (ATA_R_ATAPI | ATA_R_CONTROL)))
	    kprintf(" LBA=%ju", request->u.ata.lba);
	kprintf("\n");
	request->flags |= ATA_R_REQUEUE;
	ata_queue_request(request);
    }

    /* we're done release the channel for new work */
    lockmgr(&ch->state_mtx, LK_EXCLUSIVE);
    ch->state = ATA_IDLE;
    lockmgr(&ch->state_mtx, LK_RELEASE);
    ATA_LOCKING(dev, ATA_LF_UNLOCK);

    if (bootverbose)
	device_printf(dev, "reinit done ..\n");

    /* kick off requests on the queue */
    ata_start(dev);
    return 0;
}
Exemple #5
0
/*
 * Setup a callout to run on the specified cpu.  Should generally be used
 * to run a callout on a specific cpu which does not nominally change.
 */
void
callout_reset_bycpu(struct callout *c, int to_ticks, void (*ftn)(void *),
		    void *arg, int cpuid)
{
	globaldata_t gd;
	globaldata_t tgd;

#ifdef INVARIANTS
        if ((c->c_flags & CALLOUT_DID_INIT) == 0) {
		callout_init(c);
		kprintf(
		    "callout_reset(%p) from %p: callout was not initialized\n",
		    c, ((int **)&c)[-1]);
		print_backtrace(-1);
	}
#endif
	gd = mycpu;
	crit_enter_gd(gd);

	tgd = globaldata_find(cpuid);

	/*
	 * Our cpu must temporarily gain ownership of the callout and cancel
	 * anything still running, which is complex.  The easiest way to do
	 * it is to issue a callout_stop().
	 *
	 * Clearing bits on flags (vs nflags) is a way to guarantee they were
	 * not previously set, by forcing the atomic op to fail.  The callout
	 * must not be pending or armed after the stop_sync, if it is we have
	 * to loop up and stop_sync() again.
	 */
	for (;;) {
		int flags;
		int nflags;

		callout_stop_sync(c);
		flags = c->c_flags & ~(CALLOUT_PENDING | CALLOUT_ARMED);
		nflags = (flags & ~(CALLOUT_CPU_MASK |
				    CALLOUT_EXECUTED)) |
			 CALLOUT_CPU_TO_FLAGS(tgd->gd_cpuid) |
			 CALLOUT_ARMED |
			 CALLOUT_ACTIVE;
		nflags = nflags + 1;		/* bump IPI count */
		if (atomic_cmpset_int(&c->c_flags, flags, nflags))
			break;
		cpu_pause();
	}

	/*
	 * Even though we are not the cpu that now owns the callout, our
	 * bumping of the IPI count (and in a situation where the callout is
	 * not queued to the callwheel) will prevent anyone else from
	 * depending on or acting on the contents of the callout structure.
	 */
	if (to_ticks <= 0)
		to_ticks = 1;

	c->c_arg = arg;
	c->c_func = ftn;
	c->c_load = to_ticks;	/* IPI will add curticks */

	lwkt_send_ipiq(tgd, callout_reset_ipi, c);
	crit_exit_gd(gd);
}