Exemple #1
0
void
flush_workqueue(struct workqueue_struct *wq)
{
	static const struct wq_flush zero_wqf;
	struct wq_flush wqf = zero_wqf;

	mutex_init(&wqf.wqf_lock, MUTEX_DEFAULT, IPL_NONE);
	cv_init(&wqf.wqf_cv, "lnxwflsh");

	if (1) {
		struct wq_flush_work *const wqfw = kmem_zalloc(sizeof(*wqfw),
		    KM_SLEEP);

		wqf.wqf_n = 1;
		wqfw->wqfw_flush = &wqf;
		INIT_WORK(&wqfw->wqfw_work, &linux_wq_barrier);
		wqfw->wqfw_work.w_wq = wq;
		wqfw->wqfw_work.w_state = WORK_PENDING;
		workqueue_enqueue(wq->wq_workqueue, &wqfw->wqfw_work.w_wk,
		    NULL);
	} else {
		struct cpu_info *ci;
		CPU_INFO_ITERATOR cii;
		struct wq_flush_work *wqfw;

		panic("per-CPU Linux workqueues don't work yet!");

		wqf.wqf_n = 0;
		for (CPU_INFO_FOREACH(cii, ci)) {
			wqfw = kmem_zalloc(sizeof(*wqfw), KM_SLEEP);
			mutex_enter(&wqf.wqf_lock);
			wqf.wqf_n++;
			mutex_exit(&wqf.wqf_lock);
			wqfw->wqfw_flush = &wqf;
			INIT_WORK(&wqfw->wqfw_work, &linux_wq_barrier);
			wqfw->wqfw_work.w_state = WORK_PENDING;
			wqfw->wqfw_work.w_wq = wq;
			workqueue_enqueue(wq->wq_workqueue,
			    &wqfw->wqfw_work.w_wk, ci);
		}
	}

	mutex_enter(&wqf.wqf_lock);
	while (0 < wqf.wqf_n)
		cv_wait(&wqf.wqf_cv, &wqf.wqf_lock);
	mutex_exit(&wqf.wqf_lock);

	cv_destroy(&wqf.wqf_cv);
	mutex_destroy(&wqf.wqf_lock);
}
/**
 * dwc2_handle_conn_id_status_change_intr() - Handles the Connector ID Status
 * Change Interrupt
 *
 * @hsotg: Programming view of DWC_otg controller
 *
 * Reads the OTG Interrupt Register (GOTCTL) to determine whether this is a
 * Device to Host Mode transition or a Host to Device Mode transition. This only
 * occurs when the cable is connected/removed from the PHY connector.
 */
static void dwc2_handle_conn_id_status_change_intr(struct dwc2_hsotg *hsotg)
{
    u32 gintmsk = DWC2_READ_4(hsotg, GINTMSK);

    /* Need to disable SOF interrupt immediately */
    gintmsk &= ~GINTSTS_SOF;
    DWC2_WRITE_4(hsotg, GINTMSK, gintmsk);

    dev_dbg(hsotg->dev, " ++Connector ID Status Change Interrupt++  (%s)\n",
            dwc2_is_host_mode(hsotg) ? "Host" : "Device");

    /*
     * Need to schedule a work, as there are possible DELAY function calls.
     * Release lock before scheduling workq as it holds spinlock during
     * scheduling.
     */
    if (hsotg->wq_otg) {
        spin_unlock(&hsotg->lock);
        workqueue_enqueue(hsotg->wq_otg, &hsotg->wf_otg, NULL);
        spin_lock(&hsotg->lock);
    }

    /* Clear interrupt */
    DWC2_WRITE_4(hsotg, GINTSTS, GINTSTS_CONIDSTSCHNG);
}
Exemple #3
0
void tp_wake_all_threads(struct thread_pool *pool)
{
    int i;
    for (i = 0; i < pool->num_threads; i++) {
        workqueue_enqueue(pool->queue, NULL, 0, arg_invalid, arg_invalid);
    }
}
int
nfs_inactive(void *v)
{
	struct vop_inactive_args /* {
		struct vnode *a_vp;
		bool *a_recycle;
	} */ *ap = v;
	struct nfsnode *np;
	struct sillyrename *sp;
	struct vnode *vp = ap->a_vp;

	np = VTONFS(vp);
	if (vp->v_type != VDIR) {
		sp = np->n_sillyrename;
		np->n_sillyrename = (struct sillyrename *)0;
	} else
		sp = NULL;
	if (sp != NULL)
		nfs_vinvalbuf(vp, 0, sp->s_cred, curlwp, 1);
	*ap->a_recycle = (np->n_flag & NREMOVED) != 0;
	np->n_flag &=
	    (NMODIFIED | NFLUSHINPROG | NFLUSHWANT | NEOFVALID | NTRUNCDELAYED);

	if (vp->v_type == VDIR && np->n_dircache)
		nfs_invaldircache(vp,
		    NFS_INVALDIRCACHE_FORCE | NFS_INVALDIRCACHE_KEEPEOF);

	VOP_UNLOCK(vp);

	if (sp != NULL) {
		workqueue_enqueue(nfs_sillyworkq, &sp->s_work, NULL);
	}

	return (0);
}
Exemple #5
0
static void
linux_worker_intr(void *arg)
{
	struct delayed_work *dw = arg;
	struct workqueue_struct *wq;

	linux_work_lock(&dw->work);

	KASSERT((dw->work.w_state == WORK_DELAYED) ||
	    (dw->work.w_state == WORK_DELAYED_CANCELLED));

	wq = dw->work.w_wq;
	mutex_enter(&wq->wq_lock);

	/* Queue the work, or return it to idle and alert any cancellers.  */
	if (__predict_true(dw->work.w_state == WORK_DELAYED)) {
		dw->work.w_state = WORK_PENDING;
		workqueue_enqueue(dw->work.w_wq->wq_workqueue, &dw->work.w_wk,
		    NULL);
	} else {
		KASSERT(dw->work.w_state == WORK_DELAYED_CANCELLED);
		dw->work.w_state = WORK_IDLE;
		dw->work.w_wq = NULL;
		cv_broadcast(&wq->wq_cv);
	}

	/* Either way, the callout is done.  */
	TAILQ_REMOVE(&dw->work.w_wq->wq_delayed, dw, dw_entry);
	callout_destroy(&dw->dw_callout);

	mutex_exit(&wq->wq_lock);
	linux_work_unlock(&dw->work);
}
Exemple #6
0
bool
queue_delayed_work(struct workqueue_struct *wq, struct delayed_work *dw,
    unsigned long ticks)
{
	bool newly_queued;

	KASSERT(wq != NULL);

	linux_work_lock(&dw->work);
	switch (dw->work.w_state) {
	case WORK_IDLE:
	case WORK_INVOKED:
		if (ticks == 0) {
			/* Skip the delay and queue it now.  */
			dw->work.w_state = WORK_PENDING;
			dw->work.w_wq = wq;
			workqueue_enqueue(wq->wq_workqueue, &dw->work.w_wk,
			    NULL);
		} else {
			callout_init(&dw->dw_callout, CALLOUT_MPSAFE);
			callout_reset(&dw->dw_callout, ticks,
			    &linux_worker_intr, dw);
			dw->work.w_state = WORK_DELAYED;
			dw->work.w_wq = wq;
			mutex_enter(&wq->wq_lock);
			TAILQ_INSERT_HEAD(&wq->wq_delayed, dw, dw_entry);
			mutex_exit(&wq->wq_lock);
		}
		newly_queued = true;
		break;

	case WORK_DELAYED:
		/*
		 * Timer is already ticking.  Leave it to time out
		 * whenever it was going to time out, as Linux does --
		 * neither speed it up nor postpone it.
		 */
		newly_queued = false;
		break;

	case WORK_PENDING:
		KASSERT(dw->work.w_wq == wq);
		newly_queued = false;
		break;

	case WORK_CANCELLED:
	case WORK_DELAYED_CANCELLED:
		/* XXX Wait for cancellation and then queue?  */
		newly_queued = false;
		break;

	default:
		panic("delayed work %p in bad state: %d", dw,
		    (int)dw->work.w_state);
		break;
	}
	linux_work_unlock(&dw->work);

	return newly_queued;
}
Exemple #7
0
bool
mod_delayed_work(struct workqueue_struct *wq, struct delayed_work *dw,
    unsigned long ticks)
{
	bool timer_modified;

	KASSERT(wq != NULL);

	linux_work_lock(&dw->work);
	switch (dw->work.w_state) {
	case WORK_IDLE:
	case WORK_INVOKED:
		if (ticks == 0) {
			/* Skip the delay and queue it now.  */
			dw->work.w_state = WORK_PENDING;
			dw->work.w_wq = wq;
			workqueue_enqueue(wq->wq_workqueue, &dw->work.w_wk,
			    NULL);
		} else {
			callout_init(&dw->dw_callout, CALLOUT_MPSAFE);
			callout_reset(&dw->dw_callout, ticks,
			    &linux_worker_intr, dw);
			dw->work.w_state = WORK_DELAYED;
			dw->work.w_wq = wq;
			mutex_enter(&wq->wq_lock);
			TAILQ_INSERT_HEAD(&wq->wq_delayed, dw, dw_entry);
			mutex_exit(&wq->wq_lock);
		}
		timer_modified = false;
		break;

	case WORK_DELAYED:
		/*
		 * Timer is already ticking.  Reschedule it.
		 */
		callout_schedule(&dw->dw_callout, ticks);
		timer_modified = true;
		break;

	case WORK_PENDING:
		KASSERT(dw->work.w_wq == wq);
		timer_modified = false;
		break;

	case WORK_CANCELLED:
	case WORK_DELAYED_CANCELLED:
		/* XXX Wait for cancellation and then queue?  */
		timer_modified = false;
		break;

	default:
		panic("delayed work %p in bad state: %d", dw,
		    (int)dw->work.w_state);
		break;
	}
	linux_work_unlock(&dw->work);

	return timer_modified;
}
Exemple #8
0
static void
emdtv_ir_intr(struct usbd_xfer *xfer, void * priv,
    usbd_status status)
{
	struct emdtv_softc *sc = priv;
	uint32_t len;

	usbd_get_xfer_status(xfer, NULL, NULL, &len, NULL);
	if (status == USBD_CANCELLED)
		return;

	if (sc->sc_ir_wq)
		workqueue_enqueue(sc->sc_ir_wq, &sc->sc_ir_work, NULL);
}
Exemple #9
0
static void
physio_biodone(struct buf *bp)
{
#if defined(DIAGNOSTIC)
	struct physio_stat *ps = bp->b_private;
	size_t todo = bp->b_bufsize;

	KASSERT(ps->ps_running > 0);
	KASSERT(bp->b_bcount <= todo);
	KASSERT(bp->b_resid <= bp->b_bcount);
#endif /* defined(DIAGNOSTIC) */

	workqueue_enqueue(physio_workqueue, &bp->b_work, NULL);
}
Exemple #10
0
bool
queue_work(struct workqueue_struct *wq, struct work_struct *work)
{
	/* True if we put it on the queue, false if it was already there.  */
	bool newly_queued;

	KASSERT(wq != NULL);

	linux_work_lock(work);
	switch (work->w_state) {
	case WORK_IDLE:
	case WORK_INVOKED:
		work->w_state = WORK_PENDING;
		work->w_wq = wq;
		workqueue_enqueue(wq->wq_workqueue, &work->w_wk, NULL);
		newly_queued = true;
		break;

	case WORK_DELAYED:
		panic("queue_work(delayed work %p)", work);
		break;

	case WORK_PENDING:
		KASSERT(work->w_wq == wq);
		newly_queued = false;
		break;

	case WORK_CANCELLED:
		newly_queued = false;
		break;

	case WORK_DELAYED_CANCELLED:
		panic("queue_work(delayed work %p)", work);
		break;

	default:
		panic("work %p in bad state: %d", work, (int)work->w_state);
		break;
	}
	linux_work_unlock(work);

	return newly_queued;
}
Exemple #11
0
int tp_destroy_thread_pool(struct thread_pool *pool)    // , struct work_queue* wq )
{
    int i = 0;

/*    /// tell all threads to exit
    for(i=0; i<pool->num_threads; i++){
        pool->arg1[i] = arg_invalid;  // pass an invalid args, tell thread i to terminate
        sem_post( &pool->sem[i] );    // awake thread i
        //dbg(" resume thread %d\n", i);
    }    
*/

    printf("start to destroy thread_pool \"%s\" ...\n", pool->name);
    for (i = 0; i < pool->num_threads; i++) {
        workqueue_enqueue(pool->queue, NULL, 0, arg_invalid, arg_invalid);
    }

    /// wait for all thread to terminate
    for (i = 0; i < pool->num_threads; i++) {
        pthread_join(pool->thread[i], NULL);
        //dbg(" join thread %d\n", i);
    }

    ///////// mutex and bitmap are not necessary now
    pthread_mutex_destroy(&pool->mutex);
    //bmp_destroy( & pool->bitmap);
    ///////////////

    for (i = 0; i < pool->num_threads; i++) {
        //sem_destroy( &pool->sem[i] );
    }

    if (pool->queue) {
        destroy_queue(pool->queue);
        pool->queue = NULL;
    }

    free(pool);
    printf("thread_pool \"%s\" released...\n", pool->name);

    return i;                   // return num of threads terminated
}
Exemple #12
0
/*
 * dmio_usrreq_fini:
 *
 *	Tear down a request.  Must be called at splsoftclock().
 */
static void
dmio_usrreq_fini(struct dmio_state *ds, struct dmio_usrreq_state *dus)
{
	struct dmover_session *dses = ds->ds_session;
	struct uio *uio_out = &dus->dus_uio_out;
	struct uio *uio_in;
	int i;

	if (uio_out->uio_iov != NULL)
		free(uio_out->uio_iov, M_TEMP);

	if (dses->dses_ninputs) {
		for (i = 0; i < dses->dses_ninputs; i++) {
			uio_in = &dus->dus_uio_in[i];
			free(uio_in->uio_iov, M_TEMP);
		}
		free(dus->dus_uio_in, M_TEMP);
	}

	workqueue_enqueue(dmio_cleaner, &dus->dus_work, NULL);
}